repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-minmax-neonfma-dup-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_6x8__neonfma_dup_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
float32x4_t vacc5x0123 = vacc0x0123;
float32x4_t vacc5x4567 = vacc0x4567;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const float32x2_t va5 = vld1_f32(a5); a5 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
const float32x4_t va1c0 = vdupq_lane_f32(va1, 0);
const float32x4_t va2c0 = vdupq_lane_f32(va2, 0);
const float32x4_t va3c0 = vdupq_lane_f32(va3, 0);
const float32x4_t va4c0 = vdupq_lane_f32(va4, 0);
const float32x4_t va5c0 = vdupq_lane_f32(va5, 0);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4c0, vb0123c0);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5c0, vb0123c0);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c0, vb4567c0);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4c0, vb4567c0);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5c0, vb4567c0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
const float32x4_t va1c1 = vdupq_lane_f32(va1, 1);
const float32x4_t va2c1 = vdupq_lane_f32(va2, 1);
const float32x4_t va3c1 = vdupq_lane_f32(va3, 1);
const float32x4_t va4c1 = vdupq_lane_f32(va4, 1);
const float32x4_t va5c1 = vdupq_lane_f32(va5, 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4c1, vb0123c1);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5c1, vb0123c1);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c1, vb4567c1);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4c1, vb4567c1);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5c1, vb4567c1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0);
const float32x4_t va1 = vld1q_dup_f32(a1);
const float32x4_t va2 = vld1q_dup_f32(a2);
const float32x4_t va3 = vld1q_dup_f32(a3);
const float32x4_t va4 = vld1q_dup_f32(a4);
const float32x4_t va5 = vld1q_dup_f32(a5);
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567);
}
p -= 6 * sizeof(void*);
} while (p != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,940 | 36.214286 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-minmax-wasmrelaxedsimd-fma-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_6x8__wasmrelaxedsimd_fma_loadsplat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
v128_t vacc5x0123 = vacc0x0123;
v128_t vacc5x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t va5 = wasm_v128_load32_splat(a5);
a5 += 1;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123, vacc5x0123);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567, vacc5x4567);
k -= sizeof(float);
} while (k != 0);
p -= 6 * sizeof(void*);
} while (p != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,271 | 34.937984 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-minmax-wasmrelaxedsimd-fma-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_6x8__wasmrelaxedsimd_fma_splat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
v128_t vacc5x0123 = vacc0x0123;
v128_t vacc5x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va5 = wasm_v128_load(a5);
a5 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb0123c0, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c0, vb0123c0, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb4567c0, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c0, vb4567c0, vacc5x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb0123c1, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c1, vb0123c1, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb4567c1, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c1, vb4567c1, vacc5x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb0123c2, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c2, vb0123c2, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb4567c2, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c2, vb4567c2, vacc5x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb0123c3, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c3, vb0123c3, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb4567c3, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c3, vb4567c3, vacc5x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t va5 = wasm_v128_load32_splat(a5);
a5 += 1;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123, vacc5x0123);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567, vacc5x4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 6 * sizeof(void*);
} while (p != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 16,082 | 42.942623 | 84 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-minmax-wasmrelaxedsimd-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_6x8__wasmrelaxedsimd_loadsplat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
v128_t vacc5x0123 = vacc0x0123;
v128_t vacc5x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t va5 = wasm_v128_load32_splat(a5);
a5 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123), vacc5x0123);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567), vacc5x4567);
k -= sizeof(float);
} while (k != 0);
p -= 6 * sizeof(void*);
} while (p != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,231 | 34.782946 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-minmax-wasmrelaxedsimd-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_6x8__wasmrelaxedsimd_splat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
v128_t vacc5x0123 = vacc0x0123;
v128_t vacc5x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va5 = wasm_v128_load(a5);
a5 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb0123c0), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb4567c0), vacc5x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb0123c1), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb4567c1), vacc5x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb0123c2), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb4567c2), vacc5x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb0123c3), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb4567c3), vacc5x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t va5 = wasm_v128_load32_splat(a5);
a5 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123), vacc5x0123);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567), vacc5x4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 6 * sizeof(void*);
} while (p != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 15,898 | 42.439891 | 81 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-minmax-wasmsimd-arm-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_loadsplat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
v128_t vacc5x0123 = vacc0x0123;
v128_t vacc5x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t va5 = wasm_v128_load32_splat(a5);
a5 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123), vacc5x0123);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567), vacc5x4567);
k -= sizeof(float);
} while (k != 0);
p -= 6 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_max(vmin, vacc4x0123);
vacc5x0123 = wasm_f32x4_max(vmin, vacc5x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_max(vmin, vacc4x4567);
vacc5x4567 = wasm_f32x4_max(vmin, vacc5x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_min(vmax, vacc4x0123);
vacc5x0123 = wasm_f32x4_min(vmax, vacc5x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_min(vmax, vacc4x4567);
vacc5x4567 = wasm_f32x4_min(vmax, vacc5x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,796 | 33.096899 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-minmax-wasmsimd-arm-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
v128_t vacc5x0123 = vacc0x0123;
v128_t vacc5x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va5 = wasm_v128_load(a5);
a5 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb0123c0), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb4567c0), vacc5x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb0123c1), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb4567c1), vacc5x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb0123c2), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb4567c2), vacc5x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb0123c3), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb4567c3), vacc5x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t va5 = wasm_v128_load32_splat(a5);
a5 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123), vacc5x0123);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567), vacc5x4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 6 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_max(vmin, vacc4x0123);
vacc5x0123 = wasm_f32x4_max(vmin, vacc5x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_max(vmin, vacc4x4567);
vacc5x4567 = wasm_f32x4_max(vmin, vacc5x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_min(vmax, vacc4x0123);
vacc5x0123 = wasm_f32x4_min(vmax, vacc5x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_min(vmax, vacc4x4567);
vacc5x4567 = wasm_f32x4_min(vmax, vacc5x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 15,463 | 41.251366 | 81 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-minmax-wasmsimd-x86-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_loadsplat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
v128_t vacc5x0123 = vacc0x0123;
v128_t vacc5x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t va5 = wasm_v128_load32_splat(a5);
a5 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123), vacc5x0123);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567), vacc5x4567);
k -= sizeof(float);
} while (k != 0);
p -= 6 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmax(vmin, vacc4x0123);
vacc5x0123 = wasm_f32x4_pmax(vmin, vacc5x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmax(vmin, vacc4x4567);
vacc5x4567 = wasm_f32x4_pmax(vmin, vacc5x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmin(vmax, vacc4x0123);
vacc5x0123 = wasm_f32x4_pmin(vmax, vacc5x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmin(vmax, vacc4x4567);
vacc5x4567 = wasm_f32x4_pmin(vmax, vacc5x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,820 | 33.189922 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-minmax-wasmsimd-x86-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
v128_t vacc5x0123 = vacc0x0123;
v128_t vacc5x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va5 = wasm_v128_load(a5);
a5 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb0123c0), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb4567c0), vacc5x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb0123c1), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb4567c1), vacc5x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb0123c2), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb4567c2), vacc5x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb0123c3), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb4567c3), vacc5x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t va5 = wasm_v128_load32_splat(a5);
a5 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123), vacc5x0123);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567), vacc5x4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 6 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmax(vmin, vacc4x0123);
vacc5x0123 = wasm_f32x4_pmax(vmin, vacc5x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmax(vmin, vacc4x4567);
vacc5x4567 = wasm_f32x4_pmax(vmin, vacc5x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmin(vmax, vacc4x0123);
vacc5x0123 = wasm_f32x4_pmin(vmax, vacc5x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmin(vmax, vacc4x4567);
vacc5x4567 = wasm_f32x4_pmin(vmax, vacc5x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 15,487 | 41.31694 | 81 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-relu-wasmrelaxedsimd-fma-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_relu_ukernel_6x8__wasmrelaxedsimd_fma_loadsplat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
v128_t vacc5x0123 = vacc0x0123;
v128_t vacc5x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t va5 = wasm_v128_load32_splat(a5);
a5 += 1;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123, vacc5x0123);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567, vacc5x4567);
k -= sizeof(float);
} while (k != 0);
p -= 6 * sizeof(void*);
} while (p != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero);
vacc5x0123 = wasm_i32x4_max(vacc5x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero);
vacc5x4567 = wasm_i32x4_max(vacc5x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,150 | 32.405738 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-relu-wasmrelaxedsimd-fma-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_relu_ukernel_6x8__wasmrelaxedsimd_fma_splat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
v128_t vacc5x0123 = vacc0x0123;
v128_t vacc5x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va5 = wasm_v128_load(a5);
a5 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb0123c0, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c0, vb0123c0, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb4567c0, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c0, vb4567c0, vacc5x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb0123c1, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c1, vb0123c1, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb4567c1, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c1, vb4567c1, vacc5x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb0123c2, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c2, vb0123c2, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb4567c2, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c2, vb4567c2, vacc5x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb0123c3, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c3, vb0123c3, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb4567c3, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c3, vb4567c3, vacc5x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t va5 = wasm_v128_load32_splat(a5);
a5 += 1;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123, vacc5x0123);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567, vacc5x4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 6 * sizeof(void*);
} while (p != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero);
vacc5x0123 = wasm_i32x4_max(vacc5x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero);
vacc5x4567 = wasm_i32x4_max(vacc5x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,961 | 41.505682 | 84 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-relu-wasmsimd-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_relu_ukernel_6x8__wasmsimd_loadsplat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
v128_t vacc5x0123 = vacc0x0123;
v128_t vacc5x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t va5 = wasm_v128_load32_splat(a5);
a5 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123), vacc5x0123);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567), vacc5x4567);
k -= sizeof(float);
} while (k != 0);
p -= 6 * sizeof(void*);
} while (p != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero);
vacc5x0123 = wasm_i32x4_max(vacc5x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero);
vacc5x4567 = wasm_i32x4_max(vacc5x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,103 | 32.213115 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-relu-wasmsimd-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_relu_ukernel_6x8__wasmsimd_splat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
v128_t vacc5x0123 = vacc0x0123;
v128_t vacc5x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va5 = wasm_v128_load(a5);
a5 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb0123c0), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb4567c0), vacc5x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb0123c1), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb4567c1), vacc5x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb0123c2), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb4567c2), vacc5x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb0123c3), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb4567c3), vacc5x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t va5 = wasm_v128_load32_splat(a5);
a5 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123), vacc5x0123);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567), vacc5x4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 6 * sizeof(void*);
} while (p != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero);
vacc5x0123 = wasm_i32x4_max(vacc5x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero);
vacc5x4567 = wasm_i32x4_max(vacc5x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,770 | 40.963068 | 81 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-wasmrelaxedsimd-fma-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_ukernel_6x8__wasmrelaxedsimd_fma_loadsplat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
v128_t vacc5x0123 = vacc0x0123;
v128_t vacc5x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t va5 = wasm_v128_load32_splat(a5);
a5 += 1;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123, vacc5x0123);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567, vacc5x4567);
k -= sizeof(float);
} while (k != 0);
p -= 6 * sizeof(void*);
} while (p != 0);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,472 | 31.350649 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-wasmrelaxedsimd-fma-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_ukernel_6x8__wasmrelaxedsimd_fma_splat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
v128_t vacc5x0123 = vacc0x0123;
v128_t vacc5x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va5 = wasm_v128_load(a5);
a5 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb0123c0, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c0, vb0123c0, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb4567c0, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c0, vb4567c0, vacc5x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb0123c1, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c1, vb0123c1, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb4567c1, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c1, vb4567c1, vacc5x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb0123c2, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c2, vb0123c2, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb4567c2, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c2, vb4567c2, vacc5x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb0123c3, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c3, vb0123c3, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb4567c3, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c3, vb4567c3, vacc5x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t va5 = wasm_v128_load32_splat(a5);
a5 += 1;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123, vacc5x0123);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567, vacc5x4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 6 * sizeof(void*);
} while (p != 0);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,283 | 41.135693 | 84 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-wasmsimd-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_ukernel_6x8__wasmsimd_loadsplat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
v128_t vacc5x0123 = vacc0x0123;
v128_t vacc5x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t va5 = wasm_v128_load32_splat(a5);
a5 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123), vacc5x0123);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567), vacc5x4567);
k -= sizeof(float);
} while (k != 0);
p -= 6 * sizeof(void*);
} while (p != 0);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,425 | 31.147186 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-wasmsimd-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_ukernel_6x8__wasmsimd_splat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
v128_t vacc5x0123 = vacc0x0123;
v128_t vacc5x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va5 = wasm_v128_load(a5);
a5 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb0123c0), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb4567c0), vacc5x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb0123c1), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb4567c1), vacc5x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb0123c2), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb4567c2), vacc5x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb0123c3), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb4567c3), vacc5x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t va5 = wasm_v128_load32_splat(a5);
a5 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123), vacc5x0123);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567), vacc5x4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 6 * sizeof(void*);
} while (p != 0);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,092 | 40.572271 | 81 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8s4-minmax-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_6x8s4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
v128_t vacc5x0123 = vacc0x0123;
v128_t vacc5x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
v128_t va4 = wasm_v128_load(a4);
a4 += 4;
v128_t va5 = wasm_v128_load(a5);
a5 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c0, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c0, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c0, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c0, vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c1, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c1, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c1, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c1, vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c2, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c2, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c2, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c2, vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c3, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c3, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c3, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c3, vacc5x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
v128_t va4 = wasm_v128_load(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
v128_t va5 = wasm_v128_load(a5);
a5 = (const float*) ((uintptr_t) a5 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc5x4567);
w += 32;
}
p -= 6 * sizeof(void*);
} while (p != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 22,091 | 49.903226 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8s4-minmax-wasmrelaxedsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_6x8s4__wasmrelaxedsimd(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
v128_t vacc5x0123 = vacc0x0123;
v128_t vacc5x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
v128_t va4 = wasm_v128_load(a4);
a4 += 4;
v128_t va5 = wasm_v128_load(a5);
a5 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c0), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c0), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c1), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c1), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c2), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c2), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c3), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c3), vacc5x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
v128_t va4 = wasm_v128_load(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
v128_t va5 = wasm_v128_load(a5);
a5 = (const float*) ((uintptr_t) a5 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc5x4567);
w += 32;
}
p -= 6 * sizeof(void*);
} while (p != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 21,799 | 49.230415 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8s4-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_6x8s4__wasmsimd_arm(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
v128_t vacc5x0123 = vacc0x0123;
v128_t vacc5x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
v128_t va4 = wasm_v128_load(a4);
a4 += 4;
v128_t va5 = wasm_v128_load(a5);
a5 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c0), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c0), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c1), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c1), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c2), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c2), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c3), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c3), vacc5x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
v128_t va4 = wasm_v128_load(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
v128_t va5 = wasm_v128_load(a5);
a5 = (const float*) ((uintptr_t) a5 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc5x4567);
w += 32;
}
p -= 6 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_max(vmin, vacc4x0123);
vacc5x0123 = wasm_f32x4_max(vmin, vacc5x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_max(vmin, vacc4x4567);
vacc5x4567 = wasm_f32x4_max(vmin, vacc5x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_min(vmax, vacc4x0123);
vacc5x0123 = wasm_f32x4_min(vmax, vacc5x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_min(vmax, vacc4x4567);
vacc5x4567 = wasm_f32x4_min(vmax, vacc5x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 21,364 | 48.228111 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8s4-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_6x8s4__wasmsimd_x86(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
v128_t vacc5x0123 = vacc0x0123;
v128_t vacc5x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
v128_t va4 = wasm_v128_load(a4);
a4 += 4;
v128_t va5 = wasm_v128_load(a5);
a5 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c0), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c0), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c1), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c1), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c2), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c2), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c3), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c3), vacc5x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
v128_t va4 = wasm_v128_load(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
v128_t va5 = wasm_v128_load(a5);
a5 = (const float*) ((uintptr_t) a5 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc5x4567);
w += 32;
}
p -= 6 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmax(vmin, vacc4x0123);
vacc5x0123 = wasm_f32x4_pmax(vmin, vacc5x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmax(vmin, vacc4x4567);
vacc5x4567 = wasm_f32x4_pmax(vmin, vacc5x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmin(vmax, vacc4x0123);
vacc5x0123 = wasm_f32x4_pmin(vmax, vacc5x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmin(vmax, vacc4x4567);
vacc5x4567 = wasm_f32x4_pmin(vmax, vacc5x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 21,388 | 48.28341 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8s4-relu-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_relu_ukernel_6x8s4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
v128_t vacc5x0123 = vacc0x0123;
v128_t vacc5x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
v128_t va4 = wasm_v128_load(a4);
a4 += 4;
v128_t va5 = wasm_v128_load(a5);
a5 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c0, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c0, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c0, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c0, vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c1, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c1, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c1, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c1, vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c2, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c2, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c2, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c2, vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c3, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c3, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c3, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c3, vacc5x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
v128_t va4 = wasm_v128_load(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
v128_t va5 = wasm_v128_load(a5);
a5 = (const float*) ((uintptr_t) a5 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc5x4567);
w += 32;
}
p -= 6 * sizeof(void*);
} while (p != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero);
vacc5x0123 = wasm_i32x4_max(vacc5x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero);
vacc5x4567 = wasm_i32x4_max(vacc5x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 20,970 | 48.930952 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8s4-relu-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_relu_ukernel_6x8s4__wasmsimd(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
v128_t vacc5x0123 = vacc0x0123;
v128_t vacc5x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
v128_t va4 = wasm_v128_load(a4);
a4 += 4;
v128_t va5 = wasm_v128_load(a5);
a5 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c0), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c0), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c1), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c1), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c2), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c2), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c3), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c3), vacc5x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
v128_t va4 = wasm_v128_load(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
v128_t va5 = wasm_v128_load(a5);
a5 = (const float*) ((uintptr_t) a5 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc5x4567);
w += 32;
}
p -= 6 * sizeof(void*);
} while (p != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero);
vacc5x0123 = wasm_i32x4_max(vacc5x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero);
vacc5x4567 = wasm_i32x4_max(vacc5x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 20,671 | 48.219048 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8s4-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_ukernel_6x8s4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
v128_t vacc5x0123 = vacc0x0123;
v128_t vacc5x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
v128_t va4 = wasm_v128_load(a4);
a4 += 4;
v128_t va5 = wasm_v128_load(a5);
a5 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c0, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c0, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c0, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c0, vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c1, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c1, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c1, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c1, vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c2, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c2, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c2, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c2, vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c3, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c3, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c3, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c3, vacc5x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
v128_t va4 = wasm_v128_load(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
v128_t va5 = wasm_v128_load(a5);
a5 = (const float*) ((uintptr_t) a5 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc5x4567);
w += 32;
}
p -= 6 * sizeof(void*);
} while (p != 0);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 20,292 | 48.859951 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8s4-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_ukernel_6x8s4__wasmsimd(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
v128_t vacc5x0123 = vacc0x0123;
v128_t vacc5x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
v128_t va4 = wasm_v128_load(a4);
a4 += 4;
v128_t va5 = wasm_v128_load(a5);
a5 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c0), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c0), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c1), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c1), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c2), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c2), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c3), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c3), vacc5x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
v128_t va4 = wasm_v128_load(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
v128_t va5 = wasm_v128_load(a5);
a5 = (const float*) ((uintptr_t) a5 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc5x4567);
w += 32;
}
p -= 6 * sizeof(void*);
} while (p != 0);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 19,993 | 48.125307 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-7x16-minmax-avx512f-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/avx512-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_igemm_minmax_ukernel_7x16__avx512f_broadcast(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 7);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (7 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
c5 = c4;
}
float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
c6 = c5;
}
do {
__m512 vacc0x0123456789ABCDEF = _mm512_load_ps(w);
__m512 vacc1x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc2x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc3x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc4x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc5x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc6x0123456789ABCDEF = vacc0x0123456789ABCDEF;
w += 16;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
const float* restrict a6 = a[6];
assert(a6 != NULL);
if XNN_UNPREDICTABLE(a6 != zero) {
a6 = (const float*) ((uintptr_t) a6 + a_offset);
}
a += 7;
size_t k = kc;
do {
const __m512 vb0123456789ABCDEF = _mm512_load_ps(w);
w += 16;
const __m512 va0 = _mm512_set1_ps(*a0);
vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
const __m512 va1 = _mm512_set1_ps(*a1);
vacc1x0123456789ABCDEF = _mm512_fmadd_ps(va1, vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
const __m512 va2 = _mm512_set1_ps(*a2);
vacc2x0123456789ABCDEF = _mm512_fmadd_ps(va2, vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
const __m512 va3 = _mm512_set1_ps(*a3);
vacc3x0123456789ABCDEF = _mm512_fmadd_ps(va3, vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
const __m512 va4 = _mm512_set1_ps(*a4);
vacc4x0123456789ABCDEF = _mm512_fmadd_ps(va4, vb0123456789ABCDEF, vacc4x0123456789ABCDEF);
const __m512 va5 = _mm512_set1_ps(*a5);
vacc5x0123456789ABCDEF = _mm512_fmadd_ps(va5, vb0123456789ABCDEF, vacc5x0123456789ABCDEF);
const __m512 va6 = _mm512_set1_ps(*a6);
vacc6x0123456789ABCDEF = _mm512_fmadd_ps(va6, vb0123456789ABCDEF, vacc6x0123456789ABCDEF);
a0 += 1;
a1 += 1;
a2 += 1;
a3 += 1;
a4 += 1;
a5 += 1;
a6 += 1;
k -= sizeof(float);
} while (k != 0);
p -= 7 * sizeof(void*);
} while (p != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
vacc0x0123456789ABCDEF = _mm512_max_ps(vmin, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_max_ps(vmin, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_max_ps(vmin, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_max_ps(vmin, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_max_ps(vmin, vacc4x0123456789ABCDEF);
vacc5x0123456789ABCDEF = _mm512_max_ps(vmin, vacc5x0123456789ABCDEF);
vacc6x0123456789ABCDEF = _mm512_max_ps(vmin, vacc6x0123456789ABCDEF);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
vacc0x0123456789ABCDEF = _mm512_min_ps(vmax, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_min_ps(vmax, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_min_ps(vmax, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_min_ps(vmax, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_min_ps(vmax, vacc4x0123456789ABCDEF);
vacc5x0123456789ABCDEF = _mm512_min_ps(vmax, vacc5x0123456789ABCDEF);
vacc6x0123456789ABCDEF = _mm512_min_ps(vmax, vacc6x0123456789ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm512_storeu_ps(c6, vacc6x0123456789ABCDEF);
c6 = (float*) ((uintptr_t) c6 + cn_stride);
_mm512_storeu_ps(c5, vacc5x0123456789ABCDEF);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
_mm512_storeu_ps(c4, vacc4x0123456789ABCDEF);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
if (nc & 15) {
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
_mm512_mask_storeu_ps(c6, vmask, vacc6x0123456789ABCDEF);
_mm512_mask_storeu_ps(c5, vmask, vacc5x0123456789ABCDEF);
_mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF);
_mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
_mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
_mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
_mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
}
nc = 0;
}
} while (nc != 0);
}
| 7,421 | 35.029126 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-7x8-minmax-avx-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_7x8__avx_broadcast(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 7);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (7 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
c5 = c4;
}
float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
c6 = c5;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(w);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc4x01234567 = vacc0x01234567;
__m256 vacc5x01234567 = vacc0x01234567;
__m256 vacc6x01234567 = vacc0x01234567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
const float* restrict a6 = a[6];
assert(a6 != NULL);
if XNN_UNPREDICTABLE(a6 != zero) {
a6 = (const float*) ((uintptr_t) a6 + a_offset);
}
a += 7;
size_t k = kc;
do {
const __m256 vb01234567 = _mm256_load_ps(w);
w += 8;
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
const __m256 va5 = _mm256_broadcast_ss(a5);
a5 += 1;
const __m256 va6 = _mm256_broadcast_ss(a6);
a6 += 1;
vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
vacc4x01234567 = _mm256_add_ps(vacc4x01234567, _mm256_mul_ps(va4, vb01234567));
vacc5x01234567 = _mm256_add_ps(vacc5x01234567, _mm256_mul_ps(va5, vb01234567));
vacc6x01234567 = _mm256_add_ps(vacc6x01234567, _mm256_mul_ps(va6, vb01234567));
k -= sizeof(float);
} while (k != 0);
p -= 7 * sizeof(void*);
} while (p != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
vacc5x01234567 = _mm256_max_ps(vmin, vacc5x01234567);
vacc6x01234567 = _mm256_max_ps(vmin, vacc6x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
vacc5x01234567 = _mm256_min_ps(vmax, vacc5x01234567);
vacc6x01234567 = _mm256_min_ps(vmax, vacc6x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c6, vacc6x01234567);
c6 = (float*) ((uintptr_t) c6 + cn_stride);
_mm256_storeu_ps(c5, vacc5x01234567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
_mm256_storeu_ps(c4, vacc4x01234567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
__m128 vacc6x0123 = _mm256_castps256_ps128(vacc6x01234567);
__m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567);
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c6, vacc6x0123);
_mm_storeu_ps(c5, vacc5x0123);
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc6x0123 = _mm256_extractf128_ps(vacc6x01234567, 1);
vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c6 += 4;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c6, vacc6x0123);
_mm_storel_pi((__m64*) c5, vacc5x0123);
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc6x0123 = _mm_movehl_ps(vacc6x0123, vacc6x0123);
vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c6 += 2;
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c6, vacc6x0123);
_mm_store_ss(c5, vacc5x0123);
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 8,778 | 33.027132 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-7x8-minmax-fma3-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_7x8__fma3_broadcast(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 7);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (7 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
c5 = c4;
}
float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
c6 = c5;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(w);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc4x01234567 = vacc0x01234567;
__m256 vacc5x01234567 = vacc0x01234567;
__m256 vacc6x01234567 = vacc0x01234567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
const float* restrict a6 = a[6];
assert(a6 != NULL);
if XNN_UNPREDICTABLE(a6 != zero) {
a6 = (const float*) ((uintptr_t) a6 + a_offset);
}
a += 7;
size_t k = kc;
do {
const __m256 vb01234567 = _mm256_load_ps(w);
w += 8;
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
const __m256 va5 = _mm256_broadcast_ss(a5);
a5 += 1;
const __m256 va6 = _mm256_broadcast_ss(a6);
a6 += 1;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
vacc5x01234567 = _mm256_fmadd_ps(va5, vb01234567, vacc5x01234567);
vacc6x01234567 = _mm256_fmadd_ps(va6, vb01234567, vacc6x01234567);
k -= sizeof(float);
} while (k != 0);
p -= 7 * sizeof(void*);
} while (p != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
vacc5x01234567 = _mm256_max_ps(vmin, vacc5x01234567);
vacc6x01234567 = _mm256_max_ps(vmin, vacc6x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
vacc5x01234567 = _mm256_min_ps(vmax, vacc5x01234567);
vacc6x01234567 = _mm256_min_ps(vmax, vacc6x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c6, vacc6x01234567);
c6 = (float*) ((uintptr_t) c6 + cn_stride);
_mm256_storeu_ps(c5, vacc5x01234567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
_mm256_storeu_ps(c4, vacc4x01234567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
__m128 vacc6x0123 = _mm256_castps256_ps128(vacc6x01234567);
__m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567);
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c6, vacc6x0123);
_mm_storeu_ps(c5, vacc5x0123);
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc6x0123 = _mm256_extractf128_ps(vacc6x01234567, 1);
vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c6 += 4;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c6, vacc6x0123);
_mm_storel_pi((__m64*) c5, vacc5x0123);
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc6x0123 = _mm_movehl_ps(vacc6x0123, vacc6x0123);
vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c6 += 2;
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c6, vacc6x0123);
_mm_store_ss(c5, vacc5x0123);
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 8,688 | 32.678295 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-8x16-minmax-avx512f-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/avx512-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_igemm_minmax_ukernel_8x16__avx512f_broadcast(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 8);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (8 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
c5 = c4;
}
float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
c6 = c5;
}
float* c7 = (float*) ((uintptr_t) c6 + cm_stride);
if XNN_UNPREDICTABLE(mr != 8) {
c7 = c6;
}
do {
__m512 vacc0x0123456789ABCDEF = _mm512_load_ps(w);
__m512 vacc1x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc2x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc3x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc4x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc5x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc6x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc7x0123456789ABCDEF = vacc0x0123456789ABCDEF;
w += 16;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
const float* restrict a6 = a[6];
assert(a6 != NULL);
if XNN_UNPREDICTABLE(a6 != zero) {
a6 = (const float*) ((uintptr_t) a6 + a_offset);
}
const float* restrict a7 = a[7];
assert(a7 != NULL);
if XNN_UNPREDICTABLE(a7 != zero) {
a7 = (const float*) ((uintptr_t) a7 + a_offset);
}
a += 8;
size_t k = kc;
do {
const __m512 vb0123456789ABCDEF = _mm512_load_ps(w);
w += 16;
const __m512 va0 = _mm512_set1_ps(*a0);
vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
const __m512 va1 = _mm512_set1_ps(*a1);
vacc1x0123456789ABCDEF = _mm512_fmadd_ps(va1, vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
const __m512 va2 = _mm512_set1_ps(*a2);
vacc2x0123456789ABCDEF = _mm512_fmadd_ps(va2, vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
const __m512 va3 = _mm512_set1_ps(*a3);
vacc3x0123456789ABCDEF = _mm512_fmadd_ps(va3, vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
const __m512 va4 = _mm512_set1_ps(*a4);
vacc4x0123456789ABCDEF = _mm512_fmadd_ps(va4, vb0123456789ABCDEF, vacc4x0123456789ABCDEF);
const __m512 va5 = _mm512_set1_ps(*a5);
vacc5x0123456789ABCDEF = _mm512_fmadd_ps(va5, vb0123456789ABCDEF, vacc5x0123456789ABCDEF);
const __m512 va6 = _mm512_set1_ps(*a6);
vacc6x0123456789ABCDEF = _mm512_fmadd_ps(va6, vb0123456789ABCDEF, vacc6x0123456789ABCDEF);
const __m512 va7 = _mm512_set1_ps(*a7);
vacc7x0123456789ABCDEF = _mm512_fmadd_ps(va7, vb0123456789ABCDEF, vacc7x0123456789ABCDEF);
a0 += 1;
a1 += 1;
a2 += 1;
a3 += 1;
a4 += 1;
a5 += 1;
a6 += 1;
a7 += 1;
k -= sizeof(float);
} while (k != 0);
p -= 8 * sizeof(void*);
} while (p != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
vacc0x0123456789ABCDEF = _mm512_max_ps(vmin, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_max_ps(vmin, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_max_ps(vmin, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_max_ps(vmin, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_max_ps(vmin, vacc4x0123456789ABCDEF);
vacc5x0123456789ABCDEF = _mm512_max_ps(vmin, vacc5x0123456789ABCDEF);
vacc6x0123456789ABCDEF = _mm512_max_ps(vmin, vacc6x0123456789ABCDEF);
vacc7x0123456789ABCDEF = _mm512_max_ps(vmin, vacc7x0123456789ABCDEF);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
vacc0x0123456789ABCDEF = _mm512_min_ps(vmax, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_min_ps(vmax, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_min_ps(vmax, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_min_ps(vmax, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_min_ps(vmax, vacc4x0123456789ABCDEF);
vacc5x0123456789ABCDEF = _mm512_min_ps(vmax, vacc5x0123456789ABCDEF);
vacc6x0123456789ABCDEF = _mm512_min_ps(vmax, vacc6x0123456789ABCDEF);
vacc7x0123456789ABCDEF = _mm512_min_ps(vmax, vacc7x0123456789ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm512_storeu_ps(c7, vacc7x0123456789ABCDEF);
c7 = (float*) ((uintptr_t) c7 + cn_stride);
_mm512_storeu_ps(c6, vacc6x0123456789ABCDEF);
c6 = (float*) ((uintptr_t) c6 + cn_stride);
_mm512_storeu_ps(c5, vacc5x0123456789ABCDEF);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
_mm512_storeu_ps(c4, vacc4x0123456789ABCDEF);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
if (nc & 15) {
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
_mm512_mask_storeu_ps(c7, vmask, vacc7x0123456789ABCDEF);
_mm512_mask_storeu_ps(c6, vmask, vacc6x0123456789ABCDEF);
_mm512_mask_storeu_ps(c5, vmask, vacc5x0123456789ABCDEF);
_mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF);
_mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
_mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
_mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
_mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
}
nc = 0;
}
} while (nc != 0);
}
| 8,236 | 35.772321 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-8x8-minmax-fma3-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_8x8__fma3_broadcast(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 8);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (8 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
c5 = c4;
}
float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
c6 = c5;
}
float* c7 = (float*) ((uintptr_t) c6 + cm_stride);
if XNN_UNPREDICTABLE(mr != 8) {
c7 = c6;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(w);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc4x01234567 = vacc0x01234567;
__m256 vacc5x01234567 = vacc0x01234567;
__m256 vacc6x01234567 = vacc0x01234567;
__m256 vacc7x01234567 = vacc0x01234567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
const float* restrict a6 = a[6];
assert(a6 != NULL);
if XNN_UNPREDICTABLE(a6 != zero) {
a6 = (const float*) ((uintptr_t) a6 + a_offset);
}
const float* restrict a7 = a[7];
assert(a7 != NULL);
if XNN_UNPREDICTABLE(a7 != zero) {
a7 = (const float*) ((uintptr_t) a7 + a_offset);
}
a += 8;
size_t k = kc;
do {
const __m256 vb01234567 = _mm256_load_ps(w);
w += 8;
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
const __m256 va5 = _mm256_broadcast_ss(a5);
a5 += 1;
const __m256 va6 = _mm256_broadcast_ss(a6);
a6 += 1;
const __m256 va7 = _mm256_broadcast_ss(a7);
a7 += 1;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
vacc5x01234567 = _mm256_fmadd_ps(va5, vb01234567, vacc5x01234567);
vacc6x01234567 = _mm256_fmadd_ps(va6, vb01234567, vacc6x01234567);
vacc7x01234567 = _mm256_fmadd_ps(va7, vb01234567, vacc7x01234567);
k -= sizeof(float);
} while (k != 0);
p -= 8 * sizeof(void*);
} while (p != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
vacc5x01234567 = _mm256_max_ps(vmin, vacc5x01234567);
vacc6x01234567 = _mm256_max_ps(vmin, vacc6x01234567);
vacc7x01234567 = _mm256_max_ps(vmin, vacc7x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
vacc5x01234567 = _mm256_min_ps(vmax, vacc5x01234567);
vacc6x01234567 = _mm256_min_ps(vmax, vacc6x01234567);
vacc7x01234567 = _mm256_min_ps(vmax, vacc7x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c7, vacc7x01234567);
c7 = (float*) ((uintptr_t) c7 + cn_stride);
_mm256_storeu_ps(c6, vacc6x01234567);
c6 = (float*) ((uintptr_t) c6 + cn_stride);
_mm256_storeu_ps(c5, vacc5x01234567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
_mm256_storeu_ps(c4, vacc4x01234567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
__m128 vacc7x0123 = _mm256_castps256_ps128(vacc7x01234567);
__m128 vacc6x0123 = _mm256_castps256_ps128(vacc6x01234567);
__m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567);
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c7, vacc7x0123);
_mm_storeu_ps(c6, vacc6x0123);
_mm_storeu_ps(c5, vacc5x0123);
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc7x0123 = _mm256_extractf128_ps(vacc7x01234567, 1);
vacc6x0123 = _mm256_extractf128_ps(vacc6x01234567, 1);
vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c7 += 4;
c6 += 4;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c7, vacc7x0123);
_mm_storel_pi((__m64*) c6, vacc6x0123);
_mm_storel_pi((__m64*) c5, vacc5x0123);
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc7x0123 = _mm_movehl_ps(vacc7x0123, vacc7x0123);
vacc6x0123 = _mm_movehl_ps(vacc6x0123, vacc6x0123);
vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c7 += 2;
c6 += 2;
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c7, vacc7x0123);
_mm_store_ss(c6, vacc6x0123);
_mm_store_ss(c5, vacc5x0123);
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 9,709 | 33.310954 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-maxpool/f32-maxpool-9p8x-minmax-neon-c4.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/maxpool.h>
void xnn_f32_maxpool_minmax_ukernel_9p8x__neon_c4(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(channels != 0);
const float32x4_t voutput_max = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t voutput_min = vld1q_dup_f32(¶ms->scalar.min);
do {
float* o = output;
{
const float* i0 = *input++;
const float* i1 = *input++;
const float* i2 = *input++;
const float* i3 = *input++;
const float* i4 = *input++;
const float* i5 = *input++;
const float* i6 = *input++;
const float* i7 = *input++;
const float* i8 = *input++;
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
i8 = (const float*) ((uintptr_t) i8 + input_offset);
if (kernel_elements < 2) {
i1 = i0;
}
if (kernel_elements <= 2) {
i2 = i0;
}
if (kernel_elements < 4) {
i3 = i0;
}
if (kernel_elements <= 4) {
i4 = i0;
}
if (kernel_elements < 6) {
i5 = i0;
}
if (kernel_elements <= 6) {
i6 = i0;
}
if (kernel_elements < 8) {
i7 = i0;
}
if (kernel_elements <= 8) {
i8 = i0;
}
size_t c = channels;
for (; c >= 4; c -= 4) {
const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi6 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi7 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi8 = vld1q_f32(i8); i8 += 4;
const float32x4_t vmax018 = vmaxq_f32(vmaxq_f32(vi0, vi1), vi8);
const float32x4_t vmax23 = vmaxq_f32(vi2, vi3);
const float32x4_t vmax45 = vmaxq_f32(vi4, vi5);
const float32x4_t vmax67 = vmaxq_f32(vi6, vi7);
const float32x4_t vmax2345 = vmaxq_f32(vmax23, vmax45);
const float32x4_t vmax01678 = vmaxq_f32(vmax018, vmax67);
const float32x4_t vmax = vmaxq_f32(vmax2345, vmax01678);
const float32x4_t vout = vmaxq_f32(vminq_f32(vmax, voutput_max), voutput_min);
vst1q_f32(o, vout); o += 4;
}
if (c != 0) {
const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi6 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi7 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi8 = vld1q_f32(i8); i8 += 4;
const float32x4_t vmax018 = vmaxq_f32(vmaxq_f32(vi0, vi1), vi8);
const float32x4_t vmax23 = vmaxq_f32(vi2, vi3);
const float32x4_t vmax45 = vmaxq_f32(vi4, vi5);
const float32x4_t vmax67 = vmaxq_f32(vi6, vi7);
const float32x4_t vmax2345 = vmaxq_f32(vmax23, vmax45);
const float32x4_t vmax01678 = vmaxq_f32(vmax018, vmax67);
const float32x4_t vmax = vmaxq_f32(vmax2345, vmax01678);
float32x4_t vout = vmaxq_f32(vminq_f32(vmax, voutput_max), voutput_min);
float32x2_t vout_lo = vget_low_f32(vout);
if (c & 2) {
vst1_f32(o, vout_lo); o += 2;
vout_lo = vget_high_f32(vout);
}
if (c & 1) {
vst1_lane_f32(o, vout_lo, 0); o += 1;
}
}
}
for (ptrdiff_t k = (ptrdiff_t) kernel_elements - 9; k > 0; k -= 8) {
const float* i0 = *input++;
const float* i1 = *input++;
const float* i2 = *input++;
const float* i3 = *input++;
const float* i4 = *input++;
const float* i5 = *input++;
const float* i6 = *input++;
const float* i7 = *input++;
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
if (k < 2) {
i1 = i0;
}
if (k <= 2) {
i2 = i0;
}
if (k < 4) {
i3 = i0;
}
if (k <= 4) {
i4 = i0;
}
if (k < 6) {
i5 = i0;
}
if (k <= 6) {
i6 = i0;
}
if (k < 8) {
i7 = i0;
}
o = output;
size_t c = channels;
for (; c >= 4; c -= 4) {
const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi6 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi7 = vld1q_f32(i7); i7 += 4;
const float32x4_t vo = vld1q_f32(o);
const float32x4_t vmax01 = vmaxq_f32(vmaxq_f32(vi0, vi1), vo);
const float32x4_t vmax23 = vmaxq_f32(vi2, vi3);
const float32x4_t vmax45 = vmaxq_f32(vi4, vi5);
const float32x4_t vmax67 = vmaxq_f32(vi6, vi7);
const float32x4_t vmax2345 = vmaxq_f32(vmax23, vmax45);
const float32x4_t vmax0167 = vmaxq_f32(vmax01, vmax67);
const float32x4_t vmax = vmaxq_f32(vmax2345, vmax0167);
const float32x4_t vout = vmaxq_f32(vminq_f32(vmax, voutput_max), voutput_min);
vst1q_f32(o, vout); o += 4;
}
if (c != 0) {
const float32x4_t vi0 = vld1q_f32(i0);
const float32x4_t vi1 = vld1q_f32(i1);
const float32x4_t vi2 = vld1q_f32(i2);
const float32x4_t vi3 = vld1q_f32(i3);
const float32x4_t vi4 = vld1q_f32(i4);
const float32x4_t vi5 = vld1q_f32(i5);
const float32x4_t vi6 = vld1q_f32(i6);
const float32x4_t vi7 = vld1q_f32(i7);
const float32x4_t vo = vld1q_f32(o);
const float32x4_t vmax01 = vmaxq_f32(vmaxq_f32(vi0, vi1), vo);
const float32x4_t vmax23 = vmaxq_f32(vi2, vi3);
const float32x4_t vmax45 = vmaxq_f32(vi4, vi5);
const float32x4_t vmax67 = vmaxq_f32(vi6, vi7);
const float32x4_t vmax2345 = vmaxq_f32(vmax23, vmax45);
const float32x4_t vmax0167 = vmaxq_f32(vmax01, vmax67);
const float32x4_t vmax = vmaxq_f32(vmax2345, vmax0167);
float32x4_t vout = vmaxq_f32(vminq_f32(vmax, voutput_max), voutput_min);
float32x2_t vout_lo = vget_low_f32(vout);
if (c & 2) {
vst1_f32(o, vout_lo); o += 2;
vout_lo = vget_high_f32(vout);
}
if (c & 1) {
vst1_lane_f32(o, vout_lo, 0); o += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_increment);
output = (float*) ((uintptr_t) o + output_increment);
} while (--output_pixels != 0);
}
| 8,221 | 34.593074 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-maxpool/f32-maxpool-9p8x-minmax-scalar-c1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/maxpool.h>
#include <xnnpack/math.h>
void xnn_f32_maxpool_minmax_ukernel_9p8x__scalar_c1(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(channels != 0);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
do {
float* o = output;
{
const float* i0 = *input++;
const float* i1 = *input++;
const float* i2 = *input++;
const float* i3 = *input++;
const float* i4 = *input++;
const float* i5 = *input++;
const float* i6 = *input++;
const float* i7 = *input++;
const float* i8 = *input++;
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
i8 = (const float*) ((uintptr_t) i8 + input_offset);
if (kernel_elements < 2) {
i1 = i0;
}
if (kernel_elements <= 2) {
i2 = i0;
}
if (kernel_elements < 4) {
i3 = i0;
}
if (kernel_elements <= 4) {
i4 = i0;
}
if (kernel_elements < 6) {
i5 = i0;
}
if (kernel_elements <= 6) {
i6 = i0;
}
if (kernel_elements < 8) {
i7 = i0;
}
if (kernel_elements <= 8) {
i8 = i0;
}
size_t c = channels;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vi7 = *i7++;
const float vi8 = *i8++;
const float vmax01 = math_max_f32(vi0, vi1);
const float vmax23 = math_max_f32(vi2, vi3);
const float vmax45 = math_max_f32(vi4, vi5);
const float vmax67 = math_max_f32(vi6, vi7);
const float vmax018 = math_max_f32(vmax01, vi8);
const float vmax2345 = math_max_f32(vmax23, vmax45);
const float vmax01678 = math_max_f32(vmax018, vmax67);
float vout = math_max_f32(vmax2345, vmax01678);
vout = math_max_f32(vout, voutput_min);
vout = math_min_f32(vout, voutput_max);
*o++ = vout;
} while (--c != 0);
}
for (ptrdiff_t k = (ptrdiff_t) kernel_elements - 9; k > 0; k -= 8) {
const float* i0 = *input++;
const float* i1 = *input++;
const float* i2 = *input++;
const float* i3 = *input++;
const float* i4 = *input++;
const float* i5 = *input++;
const float* i6 = *input++;
const float* i7 = *input++;
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
if (k < 2) {
i1 = i0;
}
if (k <= 2) {
i2 = i0;
}
if (k < 4) {
i3 = i0;
}
if (k <= 4) {
i4 = i0;
}
if (k < 6) {
i5 = i0;
}
if (k <= 6) {
i6 = i0;
}
if (k < 8) {
i7 = i0;
}
o = output;
size_t c = channels;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vi7 = *i7++;
const float vi8 = *o;
const float vmax01 = math_max_f32(vi0, vi1);
const float vmax23 = math_max_f32(vi2, vi3);
const float vmax45 = math_max_f32(vi4, vi5);
const float vmax67 = math_max_f32(vi6, vi7);
const float vmax018 = math_max_f32(vmax01, vi8);
const float vmax2345 = math_max_f32(vmax23, vmax45);
const float vmax01678 = math_max_f32(vmax018, vmax67);
float vout = math_max_f32(vmax2345, vmax01678);
vout = math_max_f32(vout, voutput_min);
vout = math_min_f32(vout, voutput_max);
*o++ = vout;
} while (--c != 0);
}
input = (const float**) ((uintptr_t) input + input_increment);
output = (float*) ((uintptr_t) o + output_increment);
} while (--output_pixels != 0);
}
| 5,279 | 29.344828 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-maxpool/f32-maxpool-9p8x-minmax-sse-c4.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/maxpool.h>
void xnn_f32_maxpool_minmax_ukernel_9p8x__sse_c4(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(channels != 0);
const __m128 voutput_max = _mm_load_ps(params->sse.max);
const __m128 voutput_min = _mm_load_ps(params->sse.min);
do {
float* o = output;
{
const float* i0 = *input++;
const float* i1 = *input++;
const float* i2 = *input++;
const float* i3 = *input++;
const float* i4 = *input++;
const float* i5 = *input++;
const float* i6 = *input++;
const float* i7 = *input++;
const float* i8 = *input++;
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
i8 = (const float*) ((uintptr_t) i8 + input_offset);
if (kernel_elements < 2) {
i1 = i0;
}
if (kernel_elements <= 2) {
i2 = i0;
}
if (kernel_elements < 4) {
i3 = i0;
}
if (kernel_elements <= 4) {
i4 = i0;
}
if (kernel_elements < 6) {
i5 = i0;
}
if (kernel_elements <= 6) {
i6 = i0;
}
if (kernel_elements < 8) {
i7 = i0;
}
if (kernel_elements <= 8) {
i8 = i0;
}
size_t c = channels;
for (; c >= 4; c -= 4) {
const __m128 vi0 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi5 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vi6 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vi7 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vi8 = _mm_loadu_ps(i8);
i8 += 4;
const __m128 vmax018 = _mm_max_ps(_mm_max_ps(vi0, vi1), vi8);
const __m128 vmax23 = _mm_max_ps(vi2, vi3);
const __m128 vmax45 = _mm_max_ps(vi4, vi5);
const __m128 vmax67 = _mm_max_ps(vi6, vi7);
const __m128 vmax2345 = _mm_max_ps(vmax23, vmax45);
const __m128 vmax01678 = _mm_max_ps(vmax018, vmax67);
const __m128 vmax = _mm_max_ps(vmax2345, vmax01678);
const __m128 vout = _mm_max_ps(_mm_min_ps(vmax, voutput_max), voutput_min);
_mm_storeu_ps(o, vout);
o += 4;
}
if (c != 0) {
const __m128 vi0 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi5 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vi6 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vi7 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vi8 = _mm_loadu_ps(i8);
i8 += 4;
const __m128 vmax018 = _mm_max_ps(_mm_max_ps(vi0, vi1), vi8);
const __m128 vmax23 = _mm_max_ps(vi2, vi3);
const __m128 vmax45 = _mm_max_ps(vi4, vi5);
const __m128 vmax67 = _mm_max_ps(vi6, vi7);
const __m128 vmax2345 = _mm_max_ps(vmax23, vmax45);
const __m128 vmax01678 = _mm_max_ps(vmax018, vmax67);
const __m128 vmax = _mm_max_ps(vmax2345, vmax01678);
__m128 vout = _mm_max_ps(_mm_min_ps(vmax, voutput_max), voutput_min);
if (c & 2) {
_mm_storel_pi((__m64*) o, vout);
o += 2;
vout = _mm_movehl_ps(vout, vout);
}
if (c & 1) {
_mm_store_ss(o, vout);
o += 1;
}
}
}
for (ptrdiff_t k = (ptrdiff_t) kernel_elements - 9; k > 0; k -= 8) {
const float* i0 = *input++;
const float* i1 = *input++;
const float* i2 = *input++;
const float* i3 = *input++;
const float* i4 = *input++;
const float* i5 = *input++;
const float* i6 = *input++;
const float* i7 = *input++;
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
if (k < 2) {
i1 = i0;
}
if (k <= 2) {
i2 = i0;
}
if (k < 4) {
i3 = i0;
}
if (k <= 4) {
i4 = i0;
}
if (k < 6) {
i5 = i0;
}
if (k <= 6) {
i6 = i0;
}
if (k < 8) {
i7 = i0;
}
o = output;
size_t c = channels;
for (; c >= 4; c -= 4) {
const __m128 vi0 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi5 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vi6 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vi7 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vo = _mm_loadu_ps(o);
const __m128 vmax01 = _mm_max_ps(_mm_max_ps(vi0, vi1), vo);
const __m128 vmax23 = _mm_max_ps(vi2, vi3);
const __m128 vmax45 = _mm_max_ps(vi4, vi5);
const __m128 vmax67 = _mm_max_ps(vi6, vi7);
const __m128 vmax2345 = _mm_max_ps(vmax23, vmax45);
const __m128 vmax0167 = _mm_max_ps(vmax01, vmax67);
const __m128 vmax = _mm_max_ps(vmax2345, vmax0167);
const __m128 vout = _mm_max_ps(_mm_min_ps(vmax, voutput_max), voutput_min);
_mm_storeu_ps(o, vout);
o += 4;
}
if (c != 0) {
const __m128 vi0 = _mm_loadu_ps(i0);
const __m128 vi1 = _mm_loadu_ps(i1);
const __m128 vi2 = _mm_loadu_ps(i2);
const __m128 vi3 = _mm_loadu_ps(i3);
const __m128 vi4 = _mm_loadu_ps(i4);
const __m128 vi5 = _mm_loadu_ps(i5);
const __m128 vi6 = _mm_loadu_ps(i6);
const __m128 vi7 = _mm_loadu_ps(i7);
const __m128 vo = _mm_loadu_ps(o);
const __m128 vmax01 = _mm_max_ps(_mm_max_ps(vi0, vi1), vo);
const __m128 vmax23 = _mm_max_ps(vi2, vi3);
const __m128 vmax45 = _mm_max_ps(vi4, vi5);
const __m128 vmax67 = _mm_max_ps(vi6, vi7);
const __m128 vmax2345 = _mm_max_ps(vmax23, vmax45);
const __m128 vmax0167 = _mm_max_ps(vmax01, vmax67);
const __m128 vmax = _mm_max_ps(vmax2345, vmax0167);
__m128 vout = _mm_max_ps(_mm_min_ps(vmax, voutput_max), voutput_min);
if (c & 2) {
_mm_storel_pi((__m64*) o, vout);
o += 2;
vout = _mm_movehl_ps(vout, vout);
}
if (c & 1) {
_mm_store_ss(o, vout);
o += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_increment);
output = (float*) ((uintptr_t) o + output_increment);
} while (--output_pixels != 0);
}
| 8,193 | 30.394636 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-maxpool/f32-maxpool-9p8x-minmax-wasm-c1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/maxpool.h>
#include <xnnpack/math.h>
void xnn_f32_maxpool_minmax_ukernel_9p8x__wasm_c1(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(channels != 0);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
do {
float* o = output;
{
const float* i0 = *input++;
const float* i1 = *input++;
const float* i2 = *input++;
const float* i3 = *input++;
const float* i4 = *input++;
const float* i5 = *input++;
const float* i6 = *input++;
const float* i7 = *input++;
const float* i8 = *input++;
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
i8 = (const float*) ((uintptr_t) i8 + input_offset);
if (kernel_elements < 2) {
i1 = i0;
}
if (kernel_elements <= 2) {
i2 = i0;
}
if (kernel_elements < 4) {
i3 = i0;
}
if (kernel_elements <= 4) {
i4 = i0;
}
if (kernel_elements < 6) {
i5 = i0;
}
if (kernel_elements <= 6) {
i6 = i0;
}
if (kernel_elements < 8) {
i7 = i0;
}
if (kernel_elements <= 8) {
i8 = i0;
}
size_t c = channels;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vi7 = *i7++;
const float vi8 = *i8++;
const float vmax01 = __builtin_wasm_max_f32(vi0, vi1);
const float vmax23 = __builtin_wasm_max_f32(vi2, vi3);
const float vmax45 = __builtin_wasm_max_f32(vi4, vi5);
const float vmax67 = __builtin_wasm_max_f32(vi6, vi7);
const float vmax018 = __builtin_wasm_max_f32(vmax01, vi8);
const float vmax2345 = __builtin_wasm_max_f32(vmax23, vmax45);
const float vmax01678 = __builtin_wasm_max_f32(vmax018, vmax67);
float vout = __builtin_wasm_max_f32(vmax2345, vmax01678);
vout = __builtin_wasm_max_f32(vout, voutput_min);
vout = __builtin_wasm_min_f32(vout, voutput_max);
*o++ = vout;
} while (--c != 0);
}
for (ptrdiff_t k = (ptrdiff_t) kernel_elements - 9; k > 0; k -= 8) {
const float* i0 = *input++;
const float* i1 = *input++;
const float* i2 = *input++;
const float* i3 = *input++;
const float* i4 = *input++;
const float* i5 = *input++;
const float* i6 = *input++;
const float* i7 = *input++;
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
if (k < 2) {
i1 = i0;
}
if (k <= 2) {
i2 = i0;
}
if (k < 4) {
i3 = i0;
}
if (k <= 4) {
i4 = i0;
}
if (k < 6) {
i5 = i0;
}
if (k <= 6) {
i6 = i0;
}
if (k < 8) {
i7 = i0;
}
o = output;
size_t c = channels;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vi7 = *i7++;
const float vi8 = *o;
const float vmax01 = __builtin_wasm_max_f32(vi0, vi1);
const float vmax23 = __builtin_wasm_max_f32(vi2, vi3);
const float vmax45 = __builtin_wasm_max_f32(vi4, vi5);
const float vmax67 = __builtin_wasm_max_f32(vi6, vi7);
const float vmax018 = __builtin_wasm_max_f32(vmax01, vi8);
const float vmax2345 = __builtin_wasm_max_f32(vmax23, vmax45);
const float vmax01678 = __builtin_wasm_max_f32(vmax018, vmax67);
float vout = __builtin_wasm_max_f32(vmax2345, vmax01678);
vout = __builtin_wasm_max_f32(vout, voutput_min);
vout = __builtin_wasm_min_f32(vout, voutput_max);
*o++ = vout;
} while (--c != 0);
}
input = (const float**) ((uintptr_t) input + input_increment);
output = (float*) ((uintptr_t) o + output_increment);
} while (--output_pixels != 0);
}
| 5,477 | 30.482759 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-maxpool/f32-maxpool-9p8x-minmax-wasmsimd-arm-c4.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/maxpool.h>
void xnn_f32_maxpool_minmax_ukernel_9p8x__wasmsimd_arm_c4(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(channels != 0);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
do {
float* o = output;
{
const float* i0 = *input++;
const float* i1 = *input++;
const float* i2 = *input++;
const float* i3 = *input++;
const float* i4 = *input++;
const float* i5 = *input++;
const float* i6 = *input++;
const float* i7 = *input++;
const float* i8 = *input++;
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
i8 = (const float*) ((uintptr_t) i8 + input_offset);
if (kernel_elements < 2) {
i1 = i0;
}
if (kernel_elements <= 2) {
i2 = i0;
}
if (kernel_elements < 4) {
i3 = i0;
}
if (kernel_elements <= 4) {
i4 = i0;
}
if (kernel_elements < 6) {
i5 = i0;
}
if (kernel_elements <= 6) {
i6 = i0;
}
if (kernel_elements < 8) {
i7 = i0;
}
if (kernel_elements <= 8) {
i8 = i0;
}
size_t c = channels;
for (; c >= 4; c -= 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
const v128_t vi8 = wasm_v128_load(i8);
i8 += 4;
const v128_t vmax018 = wasm_f32x4_max(wasm_f32x4_max(vi0, vi1), vi8);
const v128_t vmax23 = wasm_f32x4_max(vi2, vi3);
const v128_t vmax45 = wasm_f32x4_max(vi4, vi5);
const v128_t vmax67 = wasm_f32x4_max(vi6, vi7);
const v128_t vmax2345 = wasm_f32x4_max(vmax23, vmax45);
const v128_t vmax01678 = wasm_f32x4_max(vmax018, vmax67);
const v128_t vmax = wasm_f32x4_max(vmax2345, vmax01678);
const v128_t vout = wasm_f32x4_max(wasm_f32x4_min(vmax, voutput_max), voutput_min);
wasm_v128_store(o, vout);
o += 4;
}
if (c != 0) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
const v128_t vi8 = wasm_v128_load(i8);
i8 += 4;
const v128_t vmax018 = wasm_f32x4_max(wasm_f32x4_max(vi0, vi1), vi8);
const v128_t vmax23 = wasm_f32x4_max(vi2, vi3);
const v128_t vmax45 = wasm_f32x4_max(vi4, vi5);
const v128_t vmax67 = wasm_f32x4_max(vi6, vi7);
const v128_t vmax2345 = wasm_f32x4_max(vmax23, vmax45);
const v128_t vmax01678 = wasm_f32x4_max(vmax018, vmax67);
const v128_t vmax = wasm_f32x4_max(vmax2345, vmax01678);
v128_t vout = wasm_f32x4_max(wasm_f32x4_min(vmax, voutput_max), voutput_min);
if (c & 2) {
wasm_v128_store64_lane(o, vout, 0);
vout = wasm_v64x2_shuffle(vout, vout, 1, 1);
o += 2;
}
if (c & 1) {
wasm_v128_store32_lane(o, vout, 0);
o += 1;
}
}
}
for (ptrdiff_t k = (ptrdiff_t) kernel_elements - 9; k > 0; k -= 8) {
const float* i0 = *input++;
const float* i1 = *input++;
const float* i2 = *input++;
const float* i3 = *input++;
const float* i4 = *input++;
const float* i5 = *input++;
const float* i6 = *input++;
const float* i7 = *input++;
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
if (k < 2) {
i1 = i0;
}
if (k <= 2) {
i2 = i0;
}
if (k < 4) {
i3 = i0;
}
if (k <= 4) {
i4 = i0;
}
if (k < 6) {
i5 = i0;
}
if (k <= 6) {
i6 = i0;
}
if (k < 8) {
i7 = i0;
}
o = output;
size_t c = channels;
for (; c >= 4; c -= 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
const v128_t vo = wasm_v128_load(o);
const v128_t vmax01 = wasm_f32x4_max(wasm_f32x4_max(vi0, vi1), vo);
const v128_t vmax23 = wasm_f32x4_max(vi2, vi3);
const v128_t vmax45 = wasm_f32x4_max(vi4, vi5);
const v128_t vmax67 = wasm_f32x4_max(vi6, vi7);
const v128_t vmax2345 = wasm_f32x4_max(vmax23, vmax45);
const v128_t vmax0167 = wasm_f32x4_max(vmax01, vmax67);
const v128_t vmax = wasm_f32x4_max(vmax2345, vmax0167);
const v128_t vout = wasm_f32x4_max(wasm_f32x4_min(vmax, voutput_max), voutput_min);
wasm_v128_store(o, vout);
o += 4;
}
if (c != 0) {
const v128_t vi0 = wasm_v128_load(i0);
const v128_t vi1 = wasm_v128_load(i1);
const v128_t vi2 = wasm_v128_load(i2);
const v128_t vi3 = wasm_v128_load(i3);
const v128_t vi4 = wasm_v128_load(i4);
const v128_t vi5 = wasm_v128_load(i5);
const v128_t vi6 = wasm_v128_load(i6);
const v128_t vi7 = wasm_v128_load(i7);
const v128_t vo = wasm_v128_load(o);
const v128_t vmax01 = wasm_f32x4_max(wasm_f32x4_max(vi0, vi1), vo);
const v128_t vmax23 = wasm_f32x4_max(vi2, vi3);
const v128_t vmax45 = wasm_f32x4_max(vi4, vi5);
const v128_t vmax67 = wasm_f32x4_max(vi6, vi7);
const v128_t vmax2345 = wasm_f32x4_max(vmax23, vmax45);
const v128_t vmax0167 = wasm_f32x4_max(vmax01, vmax67);
const v128_t vmax = wasm_f32x4_max(vmax2345, vmax0167);
v128_t vout = wasm_f32x4_max(wasm_f32x4_min(vmax, voutput_max), voutput_min);
if (c & 2) {
wasm_v128_store64_lane(o, vout, 0);
vout = wasm_v64x2_shuffle(vout, vout, 1, 1);
o += 2;
}
if (c & 1) {
wasm_v128_store32_lane(o, vout, 0);
o += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_increment);
output = (float*) ((uintptr_t) o + output_increment);
} while (--output_pixels != 0);
}
| 8,527 | 31.67433 | 91 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-maxpool/f32-maxpool-9p8x-minmax-wasmsimd-x86-c4.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/maxpool.h>
void xnn_f32_maxpool_minmax_ukernel_9p8x__wasmsimd_x86_c4(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(channels != 0);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
do {
float* o = output;
{
const float* i0 = *input++;
const float* i1 = *input++;
const float* i2 = *input++;
const float* i3 = *input++;
const float* i4 = *input++;
const float* i5 = *input++;
const float* i6 = *input++;
const float* i7 = *input++;
const float* i8 = *input++;
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
i8 = (const float*) ((uintptr_t) i8 + input_offset);
if (kernel_elements < 2) {
i1 = i0;
}
if (kernel_elements <= 2) {
i2 = i0;
}
if (kernel_elements < 4) {
i3 = i0;
}
if (kernel_elements <= 4) {
i4 = i0;
}
if (kernel_elements < 6) {
i5 = i0;
}
if (kernel_elements <= 6) {
i6 = i0;
}
if (kernel_elements < 8) {
i7 = i0;
}
if (kernel_elements <= 8) {
i8 = i0;
}
size_t c = channels;
for (; c >= 4; c -= 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
const v128_t vi8 = wasm_v128_load(i8);
i8 += 4;
const v128_t vmax01 = wasm_f32x4_pmax(vi1, vi0);
const v128_t vmax23 = wasm_f32x4_pmax(vi3, vi2);
const v128_t vmax45 = wasm_f32x4_pmax(vi5, vi4);
const v128_t vmax018 = wasm_f32x4_pmax(vi8, vmax01);
const v128_t vmax67 = wasm_f32x4_pmax(vi7, vi6);
const v128_t vmax2345 = wasm_f32x4_pmax(vmax45, vmax23);
const v128_t vmax01678 = wasm_f32x4_pmax(vmax67, vmax018);
const v128_t vmax = wasm_f32x4_pmax(vmax2345, vmax01678);
v128_t vout = wasm_f32x4_pmax(voutput_min, vmax);
vout = wasm_f32x4_pmin(voutput_max, vout);
wasm_v128_store(o, vout);
o += 4;
}
if (c != 0) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
const v128_t vi8 = wasm_v128_load(i8);
i8 += 4;
const v128_t vmax01 = wasm_f32x4_pmax(vi1, vi0);
const v128_t vmax23 = wasm_f32x4_pmax(vi3, vi2);
const v128_t vmax45 = wasm_f32x4_pmax(vi5, vi4);
const v128_t vmax018 = wasm_f32x4_pmax(vi8, vmax01);
const v128_t vmax67 = wasm_f32x4_pmax(vi7, vi6);
const v128_t vmax2345 = wasm_f32x4_pmax(vmax45, vmax23);
const v128_t vmax01678 = wasm_f32x4_pmax(vmax67, vmax018);
const v128_t vmax = wasm_f32x4_pmax(vmax2345, vmax01678);
v128_t vout = wasm_f32x4_pmax(voutput_min, vmax);
vout = wasm_f32x4_pmin(voutput_max, vout);
if (c & 2) {
wasm_v128_store64_lane(o, vout, 0);
vout = wasm_v64x2_shuffle(vout, vout, 1, 1);
o += 2;
}
if (c & 1) {
wasm_v128_store32_lane(o, vout, 0);
o += 1;
}
}
}
for (ptrdiff_t k = (ptrdiff_t) kernel_elements - 9; k > 0; k -= 8) {
const float* i0 = *input++;
const float* i1 = *input++;
const float* i2 = *input++;
const float* i3 = *input++;
const float* i4 = *input++;
const float* i5 = *input++;
const float* i6 = *input++;
const float* i7 = *input++;
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
if (k < 2) {
i1 = i0;
}
if (k <= 2) {
i2 = i0;
}
if (k < 4) {
i3 = i0;
}
if (k <= 4) {
i4 = i0;
}
if (k < 6) {
i5 = i0;
}
if (k <= 6) {
i6 = i0;
}
if (k < 8) {
i7 = i0;
}
o = output;
size_t c = channels;
for (; c >= 4; c -= 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
const v128_t vo = wasm_v128_load(o);
const v128_t vmax01 = wasm_f32x4_pmax(vi1, vi0);
const v128_t vmax23 = wasm_f32x4_pmax(vi3, vi2);
const v128_t vmax45 = wasm_f32x4_pmax(vi5, vi4);
const v128_t vmax01o = wasm_f32x4_pmax(vo, vmax01);
const v128_t vmax67 = wasm_f32x4_pmax(vi7, vi6);
const v128_t vmax2345 = wasm_f32x4_pmax(vmax45, vmax23);
const v128_t vmax0167 = wasm_f32x4_pmax(vmax67, vmax01o);
const v128_t vmax = wasm_f32x4_pmax(vmax2345, vmax0167);
v128_t vout = wasm_f32x4_pmax(voutput_min, vmax);
vout = wasm_f32x4_pmin(voutput_max, vout);
wasm_v128_store(o, vout);
o += 4;
}
if (c != 0) {
const v128_t vi0 = wasm_v128_load(i0);
const v128_t vi1 = wasm_v128_load(i1);
const v128_t vi2 = wasm_v128_load(i2);
const v128_t vi3 = wasm_v128_load(i3);
const v128_t vi4 = wasm_v128_load(i4);
const v128_t vi5 = wasm_v128_load(i5);
const v128_t vi6 = wasm_v128_load(i6);
const v128_t vi7 = wasm_v128_load(i7);
const v128_t vo = wasm_v128_load(o);
const v128_t vmax01 = wasm_f32x4_pmax(vi1, vi0);
const v128_t vmax23 = wasm_f32x4_pmax(vi3, vi2);
const v128_t vmax45 = wasm_f32x4_pmax(vi5, vi4);
const v128_t vmax01o = wasm_f32x4_pmax(vo, vmax01);
const v128_t vmax67 = wasm_f32x4_pmax(vi7, vi6);
const v128_t vmax2345 = wasm_f32x4_pmax(vmax45, vmax23);
const v128_t vmax0167 = wasm_f32x4_pmax(vmax67, vmax01o);
const v128_t vmax = wasm_f32x4_pmax(vmax2345, vmax0167);
v128_t vout = wasm_f32x4_pmax(voutput_min, vmax);
vout = wasm_f32x4_pmin(voutput_max, vout);
if (c & 2) {
wasm_v128_store64_lane(o, vout, 0);
vout = wasm_v64x2_shuffle(vout, vout, 1, 1);
o += 2;
}
if (c & 1) {
wasm_v128_store32_lane(o, vout, 0);
o += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_increment);
output = (float*) ((uintptr_t) o + output_increment);
} while (--output_pixels != 0);
}
| 8,799 | 31.234432 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-pavgpool/f32-pavgpool-9p8x-minmax-neon-c4.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/pavgpool.h>
void xnn_f32_pavgpool_minmax_ukernel_9p8x__neon_c4(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
const float* multiplier,
float* buffer,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements > 9);
assert(channels != 0);
const float32x4_t voutput_min = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t voutput_max = vld1q_dup_f32(¶ms->scalar.max);
do {
{
const float* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = *input++;
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
float* b = buffer;
for (size_t c = 0; c < channels; c += 4) {
const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi6 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi7 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi8 = vld1q_f32(i8); i8 += 4;
const float32x4_t vsum01 = vaddq_f32(vi0, vi1);
const float32x4_t vsum23 = vaddq_f32(vi2, vi3);
const float32x4_t vsum45 = vaddq_f32(vi4, vi5);
const float32x4_t vsum67 = vaddq_f32(vi6, vi7);
const float32x4_t vsum018 = vaddq_f32(vsum01, vi8);
const float32x4_t vsum2345 = vaddq_f32(vsum23, vsum45);
const float32x4_t vsum01678 = vaddq_f32(vsum018, vsum67);
const float32x4_t vsum = vaddq_f32(vsum2345, vsum01678);
vst1q_f32(b, vsum); b += 4;
}
}
size_t k = kernel_elements;
for (k -= 9; k > 8; k -= 8) {
const float* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
float* b = buffer;
for (size_t c = 0; c < channels; c += 4) {
const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi6 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi7 = vld1q_f32(i7); i7 += 4;
const float32x4_t vacc = vld1q_f32(b);
const float32x4_t vsum01 = vaddq_f32(vi0, vi1);
const float32x4_t vsum23 = vaddq_f32(vi2, vi3);
const float32x4_t vsum45 = vaddq_f32(vi4, vi5);
const float32x4_t vsum67 = vaddq_f32(vi6, vi7);
const float32x4_t vsum01a = vaddq_f32(vsum01, vacc);
const float32x4_t vsum2345 = vaddq_f32(vsum23, vsum45);
const float32x4_t vsum0167a = vaddq_f32(vsum01a, vsum67);
const float32x4_t vsum = vaddq_f32(vsum2345, vsum0167a);
vst1q_f32(b, vsum); b += 4;
}
}
{
const float* i0 = input[0];
assert(i0 != NULL);
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
input = (const float**) ((uintptr_t) input + input_increment);
if (k < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (k <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (k < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (k <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (k < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (k <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (k < 8) {
i7 = zero;
}
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float32x4_t vmultiplier = vld1q_dup_f32(multiplier); multiplier += 1;
size_t c = channels;
float* b = buffer;
while (c >= 4) {
const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi6 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi7 = vld1q_f32(i7); i7 += 4;
const float32x4_t vacc = vld1q_f32(b); b += 4;
const float32x4_t vsum01 = vaddq_f32(vi0, vi1);
const float32x4_t vsum23 = vaddq_f32(vi2, vi3);
const float32x4_t vsum45 = vaddq_f32(vi4, vi5);
const float32x4_t vsum67 = vaddq_f32(vi6, vi7);
const float32x4_t vsum01a = vaddq_f32(vsum01, vacc);
const float32x4_t vsum2345 = vaddq_f32(vsum23, vsum45);
const float32x4_t vsum0167a = vaddq_f32(vsum01a, vsum67);
const float32x4_t vsum = vaddq_f32(vsum2345, vsum0167a);
float32x4_t vout = vmulq_f32(vsum, vmultiplier);
vout = vmaxq_f32(vout, voutput_min);
vout = vminq_f32(vout, voutput_max);
vst1q_f32(output, vout); output += 4;
c -= 4;
}
if (c != 0) {
const float32x4_t vi0 = vld1q_f32(i0);
const float32x4_t vi1 = vld1q_f32(i1);
const float32x4_t vi2 = vld1q_f32(i2);
const float32x4_t vi3 = vld1q_f32(i3);
const float32x4_t vi4 = vld1q_f32(i4);
const float32x4_t vi5 = vld1q_f32(i5);
const float32x4_t vi6 = vld1q_f32(i6);
const float32x4_t vi7 = vld1q_f32(i7);
const float32x4_t vacc = vld1q_f32(b);
const float32x4_t vsum01 = vaddq_f32(vi0, vi1);
const float32x4_t vsum23 = vaddq_f32(vi2, vi3);
const float32x4_t vsum45 = vaddq_f32(vi4, vi5);
const float32x4_t vsum67 = vaddq_f32(vi6, vi7);
const float32x4_t vsum01a = vaddq_f32(vsum01, vacc);
const float32x4_t vsum2345 = vaddq_f32(vsum23, vsum45);
const float32x4_t vsum0167a = vaddq_f32(vsum01a, vsum67);
const float32x4_t vsum = vaddq_f32(vsum2345, vsum0167a);
float32x4_t vout = vmulq_f32(vsum, vmultiplier);
vout = vmaxq_f32(vout, voutput_min);
vout = vminq_f32(vout, voutput_max);
float32x2_t vout_lo = vget_low_f32(vout);
if (c & 2) {
vst1_f32(output, vout_lo); output += 2;
vout_lo = vget_high_f32(vout);
}
if (c & 1) {
vst1_lane_f32(output, vout_lo, 0); output += 1;
}
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 10,638 | 33.542208 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-pavgpool/f32-pavgpool-9p8x-minmax-scalar-c1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/pavgpool.h>
#include <xnnpack/math.h>
void xnn_f32_pavgpool_minmax_ukernel_9p8x__scalar_c1(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
const float* multiplier,
float* buffer,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(output_pixels != 0);
assert(kernel_elements > 9);
assert(channels != 0);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
do {
{
const float* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = *input++;
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
float* b = buffer;
size_t c = channels;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vi7 = *i7++;
const float vi8 = *i8++;
const float vsum01 = vi0 + vi1;
const float vsum23 = vi2 + vi3;
const float vsum45 = vi4 + vi5;
const float vsum67 = vi6 + vi7;
const float vsum018 = vsum01 + vi8;
const float vsum2345 = vsum23 + vsum45;
const float vsum01678 = vsum018 + vsum67;
const float vsum = vsum2345 + vsum01678;
*b++ = vsum;
} while (--c != 0);
}
size_t k = kernel_elements;
for (k -= 9; k > 8; k -= 8) {
const float* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
float* b = buffer;
size_t c = channels;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vi7 = *i7++;
const float vacc = *b;
const float vsum01 = vi0 + vi1;
const float vsum23 = vi2 + vi3;
const float vsum45 = vi4 + vi5;
const float vsum67 = vi6 + vi7;
const float vsum01a = vsum01 + vacc;
const float vsum2345 = vsum23 + vsum45;
const float vsum0167a = vsum01a + vsum67;
const float vsum = vsum2345 + vsum0167a;
*b++ = vsum;
} while (--c != 0);
}
{
const float* i0 = input[0];
assert(i0 != NULL);
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
input = (const float**) ((uintptr_t) input + input_increment);
if (k < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (k <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (k < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (k <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (k < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (k <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (k < 8) {
i7 = zero;
}
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float vmultiplier = *multiplier++;
size_t c = channels;
float* b = buffer;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vi7 = *i7++;
const float vacc = *b++;
const float vsum01 = vi0 + vi1;
const float vsum23 = vi2 + vi3;
const float vsum45 = vi4 + vi5;
const float vsum67 = vi6 + vi7;
const float vsum01a = vsum01 + vacc;
const float vsum2345 = vsum23 + vsum45;
const float vsum0167a = vsum01a + vsum67;
const float vsum = vsum2345 + vsum0167a;
float vout = vsum * vmultiplier;
vout = math_max_f32(vout, voutput_min);
vout = math_min_f32(vout, voutput_max);
*output++ = vout;
} while (--c != 0);
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 8,154 | 28.762774 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-pavgpool/f32-pavgpool-9p8x-minmax-sse-c4.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/pavgpool.h>
void xnn_f32_pavgpool_minmax_ukernel_9p8x__sse_c4(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
const float* multiplier,
float* buffer,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements > 9);
assert(channels != 0);
const __m128 voutput_min = _mm_load_ps(params->sse.min);
const __m128 voutput_max = _mm_load_ps(params->sse.max);
do {
{
const float* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = *input++;
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
float* b = buffer;
for (size_t c = 0; c < channels; c += 4) {
const __m128 vi0 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi5 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vi6 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vi7 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vi8 = _mm_loadu_ps(i8);
i8 += 4;
const __m128 vsum01 = _mm_add_ps(vi0, vi1);
const __m128 vsum23 = _mm_add_ps(vi2, vi3);
const __m128 vsum45 = _mm_add_ps(vi4, vi5);
const __m128 vsum67 = _mm_add_ps(vi6, vi7);
const __m128 vsum018 = _mm_add_ps(vsum01, vi8);
const __m128 vsum2345 = _mm_add_ps(vsum23, vsum45);
const __m128 vsum01678 = _mm_add_ps(vsum018, vsum67);
const __m128 vsum = _mm_add_ps(vsum2345, vsum01678);
_mm_store_ps(b, vsum); b += 4;
}
}
size_t k = kernel_elements;
for (k -= 9; k > 8; k -= 8) {
const float* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
float* b = buffer;
for (size_t c = 0; c < channels; c += 4) {
const __m128 vi0 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi5 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vi6 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vi7 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vacc = _mm_load_ps(b);
const __m128 vsum01 = _mm_add_ps(vi0, vi1);
const __m128 vsum23 = _mm_add_ps(vi2, vi3);
const __m128 vsum45 = _mm_add_ps(vi4, vi5);
const __m128 vsum67 = _mm_add_ps(vi6, vi7);
const __m128 vsum01a = _mm_add_ps(vsum01, vacc);
const __m128 vsum2345 = _mm_add_ps(vsum23, vsum45);
const __m128 vsum0167a = _mm_add_ps(vsum01a, vsum67);
const __m128 vsum = _mm_add_ps(vsum2345, vsum0167a);
_mm_store_ps(b, vsum); b += 4;
}
}
{
const float* i0 = input[0];
assert(i0 != NULL);
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
input = (const float**) ((uintptr_t) input + input_increment);
if (k < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (k <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (k < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (k <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (k < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (k <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (k < 8) {
i7 = zero;
}
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const __m128 vmultiplier = _mm_load1_ps(multiplier);
multiplier += 1;
size_t c = channels;
float* b = buffer;
while (c >= 4) {
const __m128 vi0 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi5 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vi6 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vi7 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vacc = _mm_load_ps(b);
b += 4;
const __m128 vsum01 = _mm_add_ps(vi0, vi1);
const __m128 vsum23 = _mm_add_ps(vi2, vi3);
const __m128 vsum45 = _mm_add_ps(vi4, vi5);
const __m128 vsum67 = _mm_add_ps(vi6, vi7);
const __m128 vsum01a = _mm_add_ps(vsum01, vacc);
const __m128 vsum2345 = _mm_add_ps(vsum23, vsum45);
const __m128 vsum0167a = _mm_add_ps(vsum01a, vsum67);
const __m128 vsum = _mm_add_ps(vsum2345, vsum0167a);
__m128 vout = _mm_mul_ps(vsum, vmultiplier);
vout = _mm_max_ps(vout, voutput_min);
vout = _mm_min_ps(vout, voutput_max);
_mm_storeu_ps(output, vout);
output += 4;
c -= 4;
}
if (c != 0) {
const __m128 vi0 = _mm_loadu_ps(i0);
const __m128 vi1 = _mm_loadu_ps(i1);
const __m128 vi2 = _mm_loadu_ps(i2);
const __m128 vi3 = _mm_loadu_ps(i3);
const __m128 vi4 = _mm_loadu_ps(i4);
const __m128 vi5 = _mm_loadu_ps(i5);
const __m128 vi6 = _mm_loadu_ps(i6);
const __m128 vi7 = _mm_loadu_ps(i7);
const __m128 vacc = _mm_load_ps(b);
const __m128 vsum01 = _mm_add_ps(vi0, vi1);
const __m128 vsum23 = _mm_add_ps(vi2, vi3);
const __m128 vsum45 = _mm_add_ps(vi4, vi5);
const __m128 vsum67 = _mm_add_ps(vi6, vi7);
const __m128 vsum01a = _mm_add_ps(vsum01, vacc);
const __m128 vsum2345 = _mm_add_ps(vsum23, vsum45);
const __m128 vsum0167a = _mm_add_ps(vsum01a, vsum67);
const __m128 vsum = _mm_add_ps(vsum2345, vsum0167a);
__m128 vout = _mm_mul_ps(vsum, vmultiplier);
vout = _mm_max_ps(vout, voutput_min);
vout = _mm_min_ps(vout, voutput_max);
if (c & 2) {
_mm_storel_pi((__m64*) output, vout);
vout = _mm_movehl_ps(vout, vout);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vout);
output += 1;
}
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 10,612 | 30.492582 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-pavgpool/f32-pavgpool-9p8x-minmax-wasm-c1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/pavgpool.h>
#include <xnnpack/math.h>
void xnn_f32_pavgpool_minmax_ukernel_9p8x__wasm_c1(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
const float* multiplier,
float* buffer,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(output_pixels != 0);
assert(kernel_elements > 9);
assert(channels != 0);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
do {
{
const float* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = *input++;
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
float* b = buffer;
size_t c = channels;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vi7 = *i7++;
const float vi8 = *i8++;
const float vsum01 = vi0 + vi1;
const float vsum23 = vi2 + vi3;
const float vsum45 = vi4 + vi5;
const float vsum67 = vi6 + vi7;
const float vsum018 = vsum01 + vi8;
const float vsum2345 = vsum23 + vsum45;
const float vsum01678 = vsum018 + vsum67;
const float vsum = vsum2345 + vsum01678;
*b++ = vsum;
} while (--c != 0);
}
size_t k = kernel_elements;
for (k -= 9; k > 8; k -= 8) {
const float* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
float* b = buffer;
size_t c = channels;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vi7 = *i7++;
const float vacc = *b;
const float vsum01 = vi0 + vi1;
const float vsum23 = vi2 + vi3;
const float vsum45 = vi4 + vi5;
const float vsum67 = vi6 + vi7;
const float vsum01a = vsum01 + vacc;
const float vsum2345 = vsum23 + vsum45;
const float vsum0167a = vsum01a + vsum67;
const float vsum = vsum2345 + vsum0167a;
*b++ = vsum;
} while (--c != 0);
}
{
const float* i0 = input[0];
assert(i0 != NULL);
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
input = (const float**) ((uintptr_t) input + input_increment);
if (k < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (k <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (k < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (k <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (k < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (k <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (k < 8) {
i7 = zero;
}
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float vmultiplier = *multiplier++;
size_t c = channels;
float* b = buffer;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vi7 = *i7++;
const float vacc = *b++;
const float vsum01 = vi0 + vi1;
const float vsum23 = vi2 + vi3;
const float vsum45 = vi4 + vi5;
const float vsum67 = vi6 + vi7;
const float vsum01a = vsum01 + vacc;
const float vsum2345 = vsum23 + vsum45;
const float vsum0167a = vsum01a + vsum67;
const float vsum = vsum2345 + vsum0167a;
float vout = vsum * vmultiplier;
vout = __builtin_wasm_max_f32(vout, voutput_min);
vout = __builtin_wasm_min_f32(vout, voutput_max);
*output++ = vout;
} while (--c != 0);
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 8,172 | 28.828467 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-pavgpool/f32-pavgpool-9p8x-minmax-wasmsimd-arm-c4.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/pavgpool.h>
void xnn_f32_pavgpool_minmax_ukernel_9p8x__wasmsimd_arm_c4(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
const float* multiplier,
float* buffer,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements > 9);
assert(channels != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
{
const float* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = *input++;
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
float* b = buffer;
for (size_t c = 0; c < channels; c += 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
const v128_t vi8 = wasm_v128_load(i8);
i8 += 4;
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum67 = wasm_f32x4_add(vi6, vi7);
const v128_t vsum018 = wasm_f32x4_add(vsum01, vi8);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum01678 = wasm_f32x4_add(vsum018, vsum67);
const v128_t vsum = wasm_f32x4_add(vsum2345, vsum01678);
wasm_v128_store(b, vsum);
b += 4;
}
}
size_t k = kernel_elements;
for (k -= 9; k > 8; k -= 8) {
const float* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
float* b = buffer;
for (size_t c = 0; c < channels; c += 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
const v128_t vacc = wasm_v128_load(b);
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum67 = wasm_f32x4_add(vi6, vi7);
const v128_t vsum01a = wasm_f32x4_add(vsum01, vacc);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum0167a = wasm_f32x4_add(vsum01a, vsum67);
const v128_t vsum = wasm_f32x4_add(vsum2345, vsum0167a);
wasm_v128_store(b, vsum);
b += 4;
}
}
{
const float* i0 = input[0];
assert(i0 != NULL);
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
input = (const float**) ((uintptr_t) input + input_increment);
if (k < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (k <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (k < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (k <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (k < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (k <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (k < 8) {
i7 = zero;
}
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const v128_t vmultiplier = wasm_v128_load32_splat(multiplier);
multiplier += 1;
size_t c = channels;
float* b = buffer;
while (c >= 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
const v128_t vacc = wasm_v128_load(b);
b += 4;
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum67 = wasm_f32x4_add(vi6, vi7);
const v128_t vsum01a = wasm_f32x4_add(vsum01, vacc);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum0167a = wasm_f32x4_add(vsum01a, vsum67);
const v128_t vsum = wasm_f32x4_add(vsum2345, vsum0167a);
v128_t vout = wasm_f32x4_mul(vsum, vmultiplier);
vout = wasm_f32x4_max(vout, vmin);
vout = wasm_f32x4_min(vout, vmax);
wasm_v128_store(output, vout);
output += 4;
c -= 4;
}
if (c != 0) {
const v128_t vi0 = wasm_v128_load(i0);
const v128_t vi1 = wasm_v128_load(i1);
const v128_t vi2 = wasm_v128_load(i2);
const v128_t vi3 = wasm_v128_load(i3);
const v128_t vi4 = wasm_v128_load(i4);
const v128_t vi5 = wasm_v128_load(i5);
const v128_t vi6 = wasm_v128_load(i6);
const v128_t vi7 = wasm_v128_load(i7);
const v128_t vacc = wasm_v128_load(b);
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum67 = wasm_f32x4_add(vi6, vi7);
const v128_t vsum01a = wasm_f32x4_add(vsum01, vacc);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum0167a = wasm_f32x4_add(vsum01a, vsum67);
const v128_t vsum = wasm_f32x4_add(vsum2345, vsum0167a);
v128_t vout = wasm_f32x4_mul(vsum, vmultiplier);
vout = wasm_f32x4_max(vout, vmin);
vout = wasm_f32x4_min(vout, vmax);
if (c & 2) {
wasm_v128_store64_lane(output, vout, 0);
vout = wasm_v64x2_shuffle(vout, vout, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vout, 0);
output += 1;
}
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 10,902 | 31.162242 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-pavgpool/f32-pavgpool-9p8x-minmax-wasmsimd-x86-c4.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/pavgpool.h>
void xnn_f32_pavgpool_minmax_ukernel_9p8x__wasmsimd_x86_c4(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
const float* multiplier,
float* buffer,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements > 9);
assert(channels != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
{
const float* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = *input++;
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
float* b = buffer;
for (size_t c = 0; c < channels; c += 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
const v128_t vi8 = wasm_v128_load(i8);
i8 += 4;
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum67 = wasm_f32x4_add(vi6, vi7);
const v128_t vsum018 = wasm_f32x4_add(vsum01, vi8);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum01678 = wasm_f32x4_add(vsum018, vsum67);
const v128_t vsum = wasm_f32x4_add(vsum2345, vsum01678);
wasm_v128_store(b, vsum);
b += 4;
}
}
size_t k = kernel_elements;
for (k -= 9; k > 8; k -= 8) {
const float* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
float* b = buffer;
for (size_t c = 0; c < channels; c += 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
const v128_t vacc = wasm_v128_load(b);
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum67 = wasm_f32x4_add(vi6, vi7);
const v128_t vsum01a = wasm_f32x4_add(vsum01, vacc);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum0167a = wasm_f32x4_add(vsum01a, vsum67);
const v128_t vsum = wasm_f32x4_add(vsum2345, vsum0167a);
wasm_v128_store(b, vsum);
b += 4;
}
}
{
const float* i0 = input[0];
assert(i0 != NULL);
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
input = (const float**) ((uintptr_t) input + input_increment);
if (k < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (k <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (k < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (k <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (k < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (k <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (k < 8) {
i7 = zero;
}
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const v128_t vmultiplier = wasm_v128_load32_splat(multiplier);
multiplier += 1;
size_t c = channels;
float* b = buffer;
while (c >= 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
const v128_t vacc = wasm_v128_load(b);
b += 4;
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum67 = wasm_f32x4_add(vi6, vi7);
const v128_t vsum01a = wasm_f32x4_add(vsum01, vacc);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum0167a = wasm_f32x4_add(vsum01a, vsum67);
const v128_t vsum = wasm_f32x4_add(vsum2345, vsum0167a);
v128_t vout = wasm_f32x4_mul(vsum, vmultiplier);
vout = wasm_f32x4_pmax(vmin, vout);
vout = wasm_f32x4_pmin(vmax, vout);
wasm_v128_store(output, vout);
output += 4;
c -= 4;
}
if (c != 0) {
const v128_t vi0 = wasm_v128_load(i0);
const v128_t vi1 = wasm_v128_load(i1);
const v128_t vi2 = wasm_v128_load(i2);
const v128_t vi3 = wasm_v128_load(i3);
const v128_t vi4 = wasm_v128_load(i4);
const v128_t vi5 = wasm_v128_load(i5);
const v128_t vi6 = wasm_v128_load(i6);
const v128_t vi7 = wasm_v128_load(i7);
const v128_t vacc = wasm_v128_load(b);
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum67 = wasm_f32x4_add(vi6, vi7);
const v128_t vsum01a = wasm_f32x4_add(vsum01, vacc);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum0167a = wasm_f32x4_add(vsum01a, vsum67);
const v128_t vsum = wasm_f32x4_add(vsum2345, vsum0167a);
v128_t vout = wasm_f32x4_mul(vsum, vmultiplier);
vout = wasm_f32x4_pmax(vmin, vout);
vout = wasm_f32x4_pmin(vmax, vout);
if (c & 2) {
wasm_v128_store64_lane(output, vout, 0);
vout = wasm_v64x2_shuffle(vout, vout, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vout, 0);
output += 1;
}
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 10,906 | 31.174041 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-pavgpool/f32-pavgpool-9x-minmax-neon-c4.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/pavgpool.h>
void xnn_f32_pavgpool_minmax_ukernel_9x__neon_c4(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
const float* multiplier,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(kernel_elements <= 9);
assert(channels != 0);
const float32x4_t voutput_min = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t voutput_max = vld1q_dup_f32(¶ms->scalar.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
const float* i8 = input[8];
input = (const float**) ((uintptr_t) input + input_increment);
if (kernel_elements < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (kernel_elements <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (kernel_elements < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (kernel_elements <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (kernel_elements < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (kernel_elements <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (kernel_elements < 8) {
i7 = zero;
}
assert(i7 != NULL);
if (kernel_elements <= 8) {
i8 = zero;
}
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float32x4_t vmultiplier = vld1q_dup_f32(multiplier); multiplier += 1;
size_t c = channels;
while (c >= 4) {
const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi6 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi7 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi8 = vld1q_f32(i8); i8 += 4;
const float32x4_t vsum01 = vaddq_f32(vi0, vi1);
const float32x4_t vsum23 = vaddq_f32(vi2, vi3);
const float32x4_t vsum45 = vaddq_f32(vi4, vi5);
const float32x4_t vsum67 = vaddq_f32(vi6, vi7);
const float32x4_t vsum018 = vaddq_f32(vsum01, vi8);
const float32x4_t vsum2345 = vaddq_f32(vsum23, vsum45);
const float32x4_t vsum01678 = vaddq_f32(vsum018, vsum67);
const float32x4_t vsum = vaddq_f32(vsum2345, vsum01678);
float32x4_t vout = vmulq_f32(vsum, vmultiplier);
vout = vmaxq_f32(vout, voutput_min);
vout = vminq_f32(vout, voutput_max);
vst1q_f32(output, vout); output += 4;
c -= 4;
}
if (c != 0) {
const float32x4_t vi0 = vld1q_f32(i0);
const float32x4_t vi1 = vld1q_f32(i1);
const float32x4_t vi2 = vld1q_f32(i2);
const float32x4_t vi3 = vld1q_f32(i3);
const float32x4_t vi4 = vld1q_f32(i4);
const float32x4_t vi5 = vld1q_f32(i5);
const float32x4_t vi6 = vld1q_f32(i6);
const float32x4_t vi7 = vld1q_f32(i7);
const float32x4_t vi8 = vld1q_f32(i8);
const float32x4_t vsum01 = vaddq_f32(vi0, vi1);
const float32x4_t vsum23 = vaddq_f32(vi2, vi3);
const float32x4_t vsum45 = vaddq_f32(vi4, vi5);
const float32x4_t vsum67 = vaddq_f32(vi6, vi7);
const float32x4_t vsum018 = vaddq_f32(vsum01, vi8);
const float32x4_t vsum2345 = vaddq_f32(vsum23, vsum45);
const float32x4_t vsum01678 = vaddq_f32(vsum018, vsum67);
const float32x4_t vsum = vaddq_f32(vsum2345, vsum01678);
float32x4_t vout = vmulq_f32(vsum, vmultiplier);
vout = vmaxq_f32(vout, voutput_min);
vout = vminq_f32(vout, voutput_max);
float32x2_t vout_lo = vget_low_f32(vout);
if (c & 2) {
vst1_f32(output, vout_lo); output += 2;
vout_lo = vget_high_f32(vout);
}
if (c & 1) {
vst1_lane_f32(output, vout_lo, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 5,513 | 30.872832 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-pavgpool/f32-pavgpool-9x-minmax-scalar-c1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/pavgpool.h>
#include <xnnpack/math.h>
void xnn_f32_pavgpool_minmax_ukernel_9x__scalar_c1(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
const float* multiplier,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(kernel_elements <= 9);
assert(channels != 0);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
const float* i8 = input[8];
input = (const float**) ((uintptr_t) input + input_increment);
if (kernel_elements < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (kernel_elements <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (kernel_elements < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (kernel_elements <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (kernel_elements < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (kernel_elements <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (kernel_elements < 8) {
i7 = zero;
}
assert(i7 != NULL);
if (kernel_elements <= 8) {
i8 = zero;
}
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float vmultiplier = *multiplier++;
size_t c = channels;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vi7 = *i7++;
const float vi8 = *i8++;
const float vsum01 = vi0 + vi1;
const float vsum23 = vi2 + vi3;
const float vsum45 = vi4 + vi5;
const float vsum67 = vi6 + vi7;
const float vsum018 = vsum01 + vi8;
const float vsum2345 = vsum23 + vsum45;
const float vsum01678 = vsum018 + vsum67;
const float vsum = vsum2345 + vsum01678;
float vout = vsum * vmultiplier;
vout = math_max_f32(vout, voutput_min);
vout = math_min_f32(vout, voutput_max);
*output++ = vout;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 3,770 | 26.525547 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-pavgpool/f32-pavgpool-9x-minmax-sse-c4.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/pavgpool.h>
void xnn_f32_pavgpool_minmax_ukernel_9x__sse_c4(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
const float* multiplier,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(kernel_elements <= 9);
assert(channels != 0);
const __m128 voutput_min = _mm_load_ps(params->sse.min);
const __m128 voutput_max = _mm_load_ps(params->sse.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
const float* i8 = input[8];
input = (const float**) ((uintptr_t) input + input_increment);
if (kernel_elements < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (kernel_elements <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (kernel_elements < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (kernel_elements <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (kernel_elements < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (kernel_elements <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (kernel_elements < 8) {
i7 = zero;
}
assert(i7 != NULL);
if (kernel_elements <= 8) {
i8 = zero;
}
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const __m128 vmultiplier = _mm_load1_ps(multiplier);
multiplier += 1;
size_t c = channels;
while (c >= 4) {
const __m128 vi0 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi5 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vi6 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vi7 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vi8 = _mm_loadu_ps(i8);
i8 += 4;
const __m128 vsum018 = _mm_add_ps(_mm_add_ps(vi0, vi1), vi8);
const __m128 vsum23 = _mm_add_ps(vi2, vi3);
const __m128 vsum45 = _mm_add_ps(vi4, vi5);
const __m128 vsum67 = _mm_add_ps(vi6, vi7);
const __m128 vsum2345 = _mm_add_ps(vsum23, vsum45);
const __m128 vsum01678 = _mm_add_ps(vsum018, vsum67);
const __m128 vsum = _mm_add_ps(vsum2345, vsum01678);
__m128 vout = _mm_mul_ps(vsum, vmultiplier);
vout = _mm_max_ps(vout, voutput_min);
vout = _mm_min_ps(vout, voutput_max);
_mm_storeu_ps(output, vout); output += 4;
c -= 4;
}
if (c != 0) {
const __m128 vi0 = _mm_loadu_ps(i0);
const __m128 vi1 = _mm_loadu_ps(i1);
const __m128 vi2 = _mm_loadu_ps(i2);
const __m128 vi3 = _mm_loadu_ps(i3);
const __m128 vi4 = _mm_loadu_ps(i4);
const __m128 vi5 = _mm_loadu_ps(i5);
const __m128 vi6 = _mm_loadu_ps(i6);
const __m128 vi7 = _mm_loadu_ps(i7);
const __m128 vi8 = _mm_loadu_ps(i8);
const __m128 vsum01 = _mm_add_ps(vi0, vi1);
const __m128 vsum23 = _mm_add_ps(vi2, vi3);
const __m128 vsum45 = _mm_add_ps(vi4, vi5);
const __m128 vsum67 = _mm_add_ps(vi6, vi7);
const __m128 vsum018 = _mm_add_ps(vsum01, vi8);
const __m128 vsum2345 = _mm_add_ps(vsum23, vsum45);
const __m128 vsum01678 = _mm_add_ps(vsum018, vsum67);
const __m128 vsum = _mm_add_ps(vsum2345, vsum01678);
__m128 vout = _mm_mul_ps(vsum, vmultiplier);
vout = _mm_max_ps(vout, voutput_min);
vout = _mm_min_ps(vout, voutput_max);
if (c & 2) {
_mm_storel_pi((__m64*) output, vout);
vout = _mm_movehl_ps(vout, vout);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vout);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 5,383 | 28.26087 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-pavgpool/f32-pavgpool-9x-minmax-wasm-c1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/pavgpool.h>
#include <xnnpack/math.h>
void xnn_f32_pavgpool_minmax_ukernel_9x__wasm_c1(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
const float* multiplier,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(kernel_elements <= 9);
assert(channels != 0);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
const float* i8 = input[8];
input = (const float**) ((uintptr_t) input + input_increment);
if (kernel_elements < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (kernel_elements <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (kernel_elements < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (kernel_elements <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (kernel_elements < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (kernel_elements <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (kernel_elements < 8) {
i7 = zero;
}
assert(i7 != NULL);
if (kernel_elements <= 8) {
i8 = zero;
}
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float vmultiplier = *multiplier++;
size_t c = channels;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vi7 = *i7++;
const float vi8 = *i8++;
const float vsum01 = vi0 + vi1;
const float vsum23 = vi2 + vi3;
const float vsum45 = vi4 + vi5;
const float vsum67 = vi6 + vi7;
const float vsum018 = vsum01 + vi8;
const float vsum2345 = vsum23 + vsum45;
const float vsum01678 = vsum018 + vsum67;
const float vsum = vsum2345 + vsum01678;
float vout = vsum * vmultiplier;
vout = __builtin_wasm_max_f32(vout, voutput_min);
vout = __builtin_wasm_min_f32(vout, voutput_max);
*output++ = vout;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 3,788 | 26.656934 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-pavgpool/f32-pavgpool-9x-minmax-wasmsimd-arm-c4.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/pavgpool.h>
void xnn_f32_pavgpool_minmax_ukernel_9x__wasmsimd_arm_c4(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
const float* multiplier,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(kernel_elements <= 9);
assert(channels != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
const float* i8 = input[8];
input = (const float**) ((uintptr_t) input + input_increment);
if (kernel_elements < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (kernel_elements <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (kernel_elements < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (kernel_elements <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (kernel_elements < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (kernel_elements <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (kernel_elements < 8) {
i7 = zero;
}
assert(i7 != NULL);
if (kernel_elements <= 8) {
i8 = zero;
}
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const v128_t vmultiplier = wasm_v128_load32_splat(multiplier);
multiplier += 1;
size_t c = channels;
while (c >= 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
const v128_t vi8 = wasm_v128_load(i8);
i8 += 4;
const v128_t vsum018 = wasm_f32x4_add(wasm_f32x4_add(vi0, vi1), vi8);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum67 = wasm_f32x4_add(vi6, vi7);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum01678 = wasm_f32x4_add(vsum018, vsum67);
const v128_t vsum = wasm_f32x4_add(vsum2345, vsum01678);
v128_t vout = wasm_f32x4_mul(vsum, vmultiplier);
vout = wasm_f32x4_max(vout, vmin);
vout = wasm_f32x4_min(vout, vmax);
wasm_v128_store(output, vout);
output += 4;
c -= 4;
}
if (c != 0) {
const v128_t vi0 = wasm_v128_load(i0);
const v128_t vi1 = wasm_v128_load(i1);
const v128_t vi2 = wasm_v128_load(i2);
const v128_t vi3 = wasm_v128_load(i3);
const v128_t vi4 = wasm_v128_load(i4);
const v128_t vi5 = wasm_v128_load(i5);
const v128_t vi6 = wasm_v128_load(i6);
const v128_t vi7 = wasm_v128_load(i7);
const v128_t vi8 = wasm_v128_load(i8);
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum67 = wasm_f32x4_add(vi6, vi7);
const v128_t vsum018 = wasm_f32x4_add(vsum01, vi8);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum01678 = wasm_f32x4_add(vsum018, vsum67);
const v128_t vsum = wasm_f32x4_add(vsum2345, vsum01678);
v128_t vout = wasm_f32x4_mul(vsum, vmultiplier);
vout = wasm_f32x4_max(vout, vmin);
vout = wasm_f32x4_min(vout, vmax);
if (c & 2) {
wasm_v128_store64_lane(output, vout, 0);
vout = wasm_v64x2_shuffle(vout, vout, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vout, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 5,554 | 29.027027 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-pavgpool/f32-pavgpool-9x-minmax-wasmsimd-x86-c4.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/pavgpool.h>
void xnn_f32_pavgpool_minmax_ukernel_9x__wasmsimd_x86_c4(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
const float* multiplier,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(kernel_elements <= 9);
assert(channels != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
const float* i8 = input[8];
input = (const float**) ((uintptr_t) input + input_increment);
if (kernel_elements < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (kernel_elements <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (kernel_elements < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (kernel_elements <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (kernel_elements < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (kernel_elements <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (kernel_elements < 8) {
i7 = zero;
}
assert(i7 != NULL);
if (kernel_elements <= 8) {
i8 = zero;
}
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const v128_t vmultiplier = wasm_v128_load32_splat(multiplier);
multiplier += 1;
size_t c = channels;
while (c >= 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
const v128_t vi8 = wasm_v128_load(i8);
i8 += 4;
const v128_t vsum018 = wasm_f32x4_add(wasm_f32x4_add(vi0, vi1), vi8);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum67 = wasm_f32x4_add(vi6, vi7);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum01678 = wasm_f32x4_add(vsum018, vsum67);
const v128_t vsum = wasm_f32x4_add(vsum2345, vsum01678);
v128_t vout = wasm_f32x4_mul(vsum, vmultiplier);
vout = wasm_f32x4_pmax(vmin, vout);
vout = wasm_f32x4_pmin(vmax, vout);
wasm_v128_store(output, vout);
output += 4;
c -= 4;
}
if (c != 0) {
const v128_t vi0 = wasm_v128_load(i0);
const v128_t vi1 = wasm_v128_load(i1);
const v128_t vi2 = wasm_v128_load(i2);
const v128_t vi3 = wasm_v128_load(i3);
const v128_t vi4 = wasm_v128_load(i4);
const v128_t vi5 = wasm_v128_load(i5);
const v128_t vi6 = wasm_v128_load(i6);
const v128_t vi7 = wasm_v128_load(i7);
const v128_t vi8 = wasm_v128_load(i8);
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum67 = wasm_f32x4_add(vi6, vi7);
const v128_t vsum018 = wasm_f32x4_add(vsum01, vi8);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum01678 = wasm_f32x4_add(vsum018, vsum67);
const v128_t vsum = wasm_f32x4_add(vsum2345, vsum01678);
v128_t vout = wasm_f32x4_mul(vsum, vmultiplier);
vout = wasm_f32x4_pmax(vmin, vout);
vout = wasm_f32x4_pmin(vmax, vout);
if (c & 2) {
wasm_v128_store64_lane(output, vout, 0);
vout = wasm_v64x2_shuffle(vout, vout, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vout, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 5,558 | 29.048649 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ppmm/gen/f32-ppmm-2x4-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/ppmm.h>
void xnn_f32_ppmm_minmax_ukernel_2x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
float vacc0x0 = w[0];
float vacc0x1 = w[1];
float vacc0x2 = w[2];
float vacc0x3 = w[3];
float vacc1x0 = vacc0x0;
float vacc1x1 = vacc0x1;
float vacc1x2 = vacc0x2;
float vacc1x3 = vacc0x3;
w += 4;
size_t k = kc;
do {
const float va0 = a[0];
const float va1 = a[1];
a += 2;
const float vb0 = w[0];
const float vb1 = w[1];
const float vb2 = w[2];
const float vb3 = w[3];
w += 4;
vacc0x0 += va0 * vb0;
vacc1x0 += va1 * vb0;
vacc0x1 += va0 * vb1;
vacc1x1 += va1 * vb1;
vacc0x2 += va0 * vb2;
vacc1x2 += va1 * vb2;
vacc0x3 += va0 * vb3;
vacc1x3 += va1 * vb3;
k -= sizeof(float);
} while (k != 0);
const float vmax = params->scalar.max;
vacc0x0 = math_min_f32(vacc0x0, vmax);
vacc1x0 = math_min_f32(vacc1x0, vmax);
vacc0x1 = math_min_f32(vacc0x1, vmax);
vacc1x1 = math_min_f32(vacc1x1, vmax);
vacc0x2 = math_min_f32(vacc0x2, vmax);
vacc1x2 = math_min_f32(vacc1x2, vmax);
vacc0x3 = math_min_f32(vacc0x3, vmax);
vacc1x3 = math_min_f32(vacc1x3, vmax);
const float vmin = params->scalar.min;
vacc0x0 = math_max_f32(vacc0x0, vmin);
vacc1x0 = math_max_f32(vacc1x0, vmin);
vacc0x1 = math_max_f32(vacc0x1, vmin);
vacc1x1 = math_max_f32(vacc1x1, vmin);
vacc0x2 = math_max_f32(vacc0x2, vmin);
vacc1x2 = math_max_f32(vacc1x2, vmin);
vacc0x3 = math_max_f32(vacc0x3, vmin);
vacc1x3 = math_max_f32(vacc1x3, vmin);
if XNN_LIKELY(nc >= 4) {
c1[0] = vacc1x0;
c1[1] = vacc1x1;
c1[2] = vacc1x2;
c1[3] = vacc1x3;
c0[0] = vacc0x0;
c0[1] = vacc0x1;
c0[2] = vacc0x2;
c0[3] = vacc0x3;
a = (const float*) ((uintptr_t) a - kc * 2);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
c1[0] = vacc1x0;
c1[1] = vacc1x1;
c0[0] = vacc0x0;
c0[1] = vacc0x1;
vacc1x0 = vacc1x2;
vacc0x0 = vacc0x2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
*c1 = vacc1x0;
*c0 = vacc0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 3,117 | 22.621212 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ppmm/gen/f32-ppmm-3x3-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/ppmm.h>
void xnn_f32_ppmm_minmax_ukernel_3x3__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
float vacc0x0 = w[0];
float vacc0x1 = w[1];
float vacc0x2 = w[2];
float vacc1x0 = vacc0x0;
float vacc1x1 = vacc0x1;
float vacc1x2 = vacc0x2;
float vacc2x0 = vacc0x0;
float vacc2x1 = vacc0x1;
float vacc2x2 = vacc0x2;
w += 3;
size_t k = kc;
do {
const float va0 = a[0];
const float va1 = a[1];
const float va2 = a[2];
a += 3;
const float vb0 = w[0];
const float vb1 = w[1];
const float vb2 = w[2];
w += 3;
vacc0x0 += va0 * vb0;
vacc1x0 += va1 * vb0;
vacc2x0 += va2 * vb0;
vacc0x1 += va0 * vb1;
vacc1x1 += va1 * vb1;
vacc2x1 += va2 * vb1;
vacc0x2 += va0 * vb2;
vacc1x2 += va1 * vb2;
vacc2x2 += va2 * vb2;
k -= sizeof(float);
} while (k != 0);
const float vmax = params->scalar.max;
vacc0x0 = math_min_f32(vacc0x0, vmax);
vacc1x0 = math_min_f32(vacc1x0, vmax);
vacc2x0 = math_min_f32(vacc2x0, vmax);
vacc0x1 = math_min_f32(vacc0x1, vmax);
vacc1x1 = math_min_f32(vacc1x1, vmax);
vacc2x1 = math_min_f32(vacc2x1, vmax);
vacc0x2 = math_min_f32(vacc0x2, vmax);
vacc1x2 = math_min_f32(vacc1x2, vmax);
vacc2x2 = math_min_f32(vacc2x2, vmax);
const float vmin = params->scalar.min;
vacc0x0 = math_max_f32(vacc0x0, vmin);
vacc1x0 = math_max_f32(vacc1x0, vmin);
vacc2x0 = math_max_f32(vacc2x0, vmin);
vacc0x1 = math_max_f32(vacc0x1, vmin);
vacc1x1 = math_max_f32(vacc1x1, vmin);
vacc2x1 = math_max_f32(vacc2x1, vmin);
vacc0x2 = math_max_f32(vacc0x2, vmin);
vacc1x2 = math_max_f32(vacc1x2, vmin);
vacc2x2 = math_max_f32(vacc2x2, vmin);
if XNN_LIKELY(nc >= 3) {
c2[0] = vacc2x0;
c2[1] = vacc2x1;
c2[2] = vacc2x2;
c1[0] = vacc1x0;
c1[1] = vacc1x1;
c1[2] = vacc1x2;
c0[0] = vacc0x0;
c0[1] = vacc0x1;
c0[2] = vacc0x2;
a = (const float*) ((uintptr_t) a - kc * 3);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
nc -= 3;
} else {
if (nc & 2) {
c2[0] = vacc2x0;
c2[1] = vacc2x1;
c1[0] = vacc1x0;
c1[1] = vacc1x1;
c0[0] = vacc0x0;
c0[1] = vacc0x1;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
*c2 = vacc2x0;
*c1 = vacc1x0;
*c0 = vacc0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 3,475 | 23.138889 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ppmm/gen/f32-ppmm-4x16-minmax-aarch64-neonfma-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/ppmm.h>
#include <xnnpack/prefetch.h>
void xnn_f32_ppmm_minmax_ukernel_4x16__aarch64_neonfma_prfm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
xnn_prefetch_to_l1((const int8_t*) w + 0);
xnn_prefetch_to_l1((const int8_t*) w + 64);
xnn_prefetch_to_l1((const int8_t*) w + 128);
xnn_prefetch_to_l1((const int8_t*) w + 192);
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0x89AB = vld1q_f32(w); w += 4;
float32x4_t vacc0xCDEF = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc1x89AB = vacc0x89AB;
float32x4_t vacc1xCDEF = vacc0xCDEF;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc2x89AB = vacc0x89AB;
float32x4_t vacc2xCDEF = vacc0xCDEF;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc3x89AB = vacc0x89AB;
float32x4_t vacc3xCDEF = vacc0xCDEF;
size_t k = kc;
do {
const float32x4_t va0123 = vld1q_f32(a); a += 4;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
const float32x4_t vb89AB = vld1q_f32(w); w += 4;
const float32x4_t vbCDEF = vld1q_f32(w); w += 4;
#if XNN_ARCH_ARM64
vacc0x0123 = vfmaq_laneq_f32(vacc0x0123, vb0123, va0123, 0);
vacc1x0123 = vfmaq_laneq_f32(vacc1x0123, vb0123, va0123, 1);
vacc2x0123 = vfmaq_laneq_f32(vacc2x0123, vb0123, va0123, 2);
vacc3x0123 = vfmaq_laneq_f32(vacc3x0123, vb0123, va0123, 3);
xnn_prefetch_to_l1((const int8_t*) w + 192);
vacc0x4567 = vfmaq_laneq_f32(vacc0x4567, vb4567, va0123, 0);
vacc1x4567 = vfmaq_laneq_f32(vacc1x4567, vb4567, va0123, 1);
vacc2x4567 = vfmaq_laneq_f32(vacc2x4567, vb4567, va0123, 2);
vacc3x4567 = vfmaq_laneq_f32(vacc3x4567, vb4567, va0123, 3);
vacc0x89AB = vfmaq_laneq_f32(vacc0x89AB, vb89AB, va0123, 0);
vacc1x89AB = vfmaq_laneq_f32(vacc1x89AB, vb89AB, va0123, 1);
vacc2x89AB = vfmaq_laneq_f32(vacc2x89AB, vb89AB, va0123, 2);
vacc3x89AB = vfmaq_laneq_f32(vacc3x89AB, vb89AB, va0123, 3);
vacc0xCDEF = vfmaq_laneq_f32(vacc0xCDEF, vbCDEF, va0123, 0);
vacc1xCDEF = vfmaq_laneq_f32(vacc1xCDEF, vbCDEF, va0123, 1);
vacc2xCDEF = vfmaq_laneq_f32(vacc2xCDEF, vbCDEF, va0123, 2);
vacc3xCDEF = vfmaq_laneq_f32(vacc3xCDEF, vbCDEF, va0123, 3);
#else
const float32x4_t va0000 = vdupq_lane_f32(vget_low_f32(va0123), 0);
const float32x4_t va1111 = vdupq_lane_f32(vget_low_f32(va0123), 1);
const float32x4_t va2222 = vdupq_lane_f32(vget_high_f32(va0123), 0);
const float32x4_t va3333 = vdupq_lane_f32(vget_high_f32(va0123), 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0000, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1111, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2222, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3333, vb0123);
xnn_prefetch_to_l1((const int8_t*) w + 192);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0000, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1111, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2222, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3333, vb4567);
vacc0x89AB = vfmaq_f32(vacc0x89AB, va0000, vb89AB);
vacc1x89AB = vfmaq_f32(vacc1x89AB, va1111, vb89AB);
vacc2x89AB = vfmaq_f32(vacc2x89AB, va2222, vb89AB);
vacc3x89AB = vfmaq_f32(vacc3x89AB, va3333, vb89AB);
vacc0xCDEF = vfmaq_f32(vacc0xCDEF, va0000, vbCDEF);
vacc1xCDEF = vfmaq_f32(vacc1xCDEF, va1111, vbCDEF);
vacc2xCDEF = vfmaq_f32(vacc2xCDEF, va2222, vbCDEF);
vacc3xCDEF = vfmaq_f32(vacc3xCDEF, va3333, vbCDEF);
#endif
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc0x89AB = vminq_f32(vacc0x89AB, vmax);
vacc1x89AB = vminq_f32(vacc1x89AB, vmax);
vacc2x89AB = vminq_f32(vacc2x89AB, vmax);
vacc3x89AB = vminq_f32(vacc3x89AB, vmax);
vacc0xCDEF = vminq_f32(vacc0xCDEF, vmax);
vacc1xCDEF = vminq_f32(vacc1xCDEF, vmax);
vacc2xCDEF = vminq_f32(vacc2xCDEF, vmax);
vacc3xCDEF = vminq_f32(vacc3xCDEF, vmax);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc0x89AB = vmaxq_f32(vacc0x89AB, vmin);
vacc1x89AB = vmaxq_f32(vacc1x89AB, vmin);
vacc2x89AB = vmaxq_f32(vacc2x89AB, vmin);
vacc3x89AB = vmaxq_f32(vacc3x89AB, vmin);
vacc0xCDEF = vmaxq_f32(vacc0xCDEF, vmin);
vacc1xCDEF = vmaxq_f32(vacc1xCDEF, vmin);
vacc2xCDEF = vmaxq_f32(vacc2xCDEF, vmin);
vacc3xCDEF = vmaxq_f32(vacc3xCDEF, vmin);
if XNN_LIKELY(nc >= 16) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
vst1q_f32(c3 + 8, vacc3x89AB);
vst1q_f32(c3 + 12, vacc3xCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
vst1q_f32(c2 + 8, vacc2x89AB);
vst1q_f32(c2 + 12, vacc2xCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
vst1q_f32(c1 + 8, vacc1x89AB);
vst1q_f32(c1 + 12, vacc1xCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
vst1q_f32(c0 + 8, vacc0x89AB);
vst1q_f32(c0 + 12, vacc0xCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float*) ((uintptr_t) a - kc * 4);
nc -= 16;
} else {
if (nc & 8) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c3, vacc3x4567); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c2, vacc2x4567); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c1, vacc1x4567); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vst1q_f32(c0, vacc0x4567); c0 += 4;
vacc3x0123 = vacc3x89AB;
vacc3x4567 = vacc3xCDEF;
vacc2x0123 = vacc2x89AB;
vacc2x4567 = vacc2xCDEF;
vacc1x0123 = vacc1x89AB;
vacc1x4567 = vacc1xCDEF;
vacc0x0123 = vacc0x89AB;
vacc0x4567 = vacc0xCDEF;
}
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc3x4567 = vacc3x89AB;
vacc3x89AB = vacc3xCDEF;
vacc2x0123 = vacc2x4567;
vacc2x4567 = vacc2x89AB;
vacc2x89AB = vacc2xCDEF;
vacc1x0123 = vacc1x4567;
vacc1x4567 = vacc1x89AB;
vacc1x89AB = vacc1xCDEF;
vacc0x0123 = vacc0x4567;
vacc0x4567 = vacc0x89AB;
vacc0x89AB = vacc0xCDEF;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,764 | 36.270992 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ppmm/gen/f32-ppmm-4x16-minmax-aarch64-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/ppmm.h>
void xnn_f32_ppmm_minmax_ukernel_4x16__aarch64_neonfma(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0x89AB = vld1q_f32(w); w += 4;
float32x4_t vacc0xCDEF = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc1x89AB = vacc0x89AB;
float32x4_t vacc1xCDEF = vacc0xCDEF;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc2x89AB = vacc0x89AB;
float32x4_t vacc2xCDEF = vacc0xCDEF;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc3x89AB = vacc0x89AB;
float32x4_t vacc3xCDEF = vacc0xCDEF;
size_t k = kc;
do {
const float32x4_t va0123 = vld1q_f32(a); a += 4;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
const float32x4_t vb89AB = vld1q_f32(w); w += 4;
const float32x4_t vbCDEF = vld1q_f32(w); w += 4;
#if XNN_ARCH_ARM64
vacc0x0123 = vfmaq_laneq_f32(vacc0x0123, vb0123, va0123, 0);
vacc1x0123 = vfmaq_laneq_f32(vacc1x0123, vb0123, va0123, 1);
vacc2x0123 = vfmaq_laneq_f32(vacc2x0123, vb0123, va0123, 2);
vacc3x0123 = vfmaq_laneq_f32(vacc3x0123, vb0123, va0123, 3);
vacc0x4567 = vfmaq_laneq_f32(vacc0x4567, vb4567, va0123, 0);
vacc1x4567 = vfmaq_laneq_f32(vacc1x4567, vb4567, va0123, 1);
vacc2x4567 = vfmaq_laneq_f32(vacc2x4567, vb4567, va0123, 2);
vacc3x4567 = vfmaq_laneq_f32(vacc3x4567, vb4567, va0123, 3);
vacc0x89AB = vfmaq_laneq_f32(vacc0x89AB, vb89AB, va0123, 0);
vacc1x89AB = vfmaq_laneq_f32(vacc1x89AB, vb89AB, va0123, 1);
vacc2x89AB = vfmaq_laneq_f32(vacc2x89AB, vb89AB, va0123, 2);
vacc3x89AB = vfmaq_laneq_f32(vacc3x89AB, vb89AB, va0123, 3);
vacc0xCDEF = vfmaq_laneq_f32(vacc0xCDEF, vbCDEF, va0123, 0);
vacc1xCDEF = vfmaq_laneq_f32(vacc1xCDEF, vbCDEF, va0123, 1);
vacc2xCDEF = vfmaq_laneq_f32(vacc2xCDEF, vbCDEF, va0123, 2);
vacc3xCDEF = vfmaq_laneq_f32(vacc3xCDEF, vbCDEF, va0123, 3);
#else
const float32x4_t va0000 = vdupq_lane_f32(vget_low_f32(va0123), 0);
const float32x4_t va1111 = vdupq_lane_f32(vget_low_f32(va0123), 1);
const float32x4_t va2222 = vdupq_lane_f32(vget_high_f32(va0123), 0);
const float32x4_t va3333 = vdupq_lane_f32(vget_high_f32(va0123), 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0000, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1111, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2222, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3333, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0000, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1111, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2222, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3333, vb4567);
vacc0x89AB = vfmaq_f32(vacc0x89AB, va0000, vb89AB);
vacc1x89AB = vfmaq_f32(vacc1x89AB, va1111, vb89AB);
vacc2x89AB = vfmaq_f32(vacc2x89AB, va2222, vb89AB);
vacc3x89AB = vfmaq_f32(vacc3x89AB, va3333, vb89AB);
vacc0xCDEF = vfmaq_f32(vacc0xCDEF, va0000, vbCDEF);
vacc1xCDEF = vfmaq_f32(vacc1xCDEF, va1111, vbCDEF);
vacc2xCDEF = vfmaq_f32(vacc2xCDEF, va2222, vbCDEF);
vacc3xCDEF = vfmaq_f32(vacc3xCDEF, va3333, vbCDEF);
#endif
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc0x89AB = vminq_f32(vacc0x89AB, vmax);
vacc1x89AB = vminq_f32(vacc1x89AB, vmax);
vacc2x89AB = vminq_f32(vacc2x89AB, vmax);
vacc3x89AB = vminq_f32(vacc3x89AB, vmax);
vacc0xCDEF = vminq_f32(vacc0xCDEF, vmax);
vacc1xCDEF = vminq_f32(vacc1xCDEF, vmax);
vacc2xCDEF = vminq_f32(vacc2xCDEF, vmax);
vacc3xCDEF = vminq_f32(vacc3xCDEF, vmax);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc0x89AB = vmaxq_f32(vacc0x89AB, vmin);
vacc1x89AB = vmaxq_f32(vacc1x89AB, vmin);
vacc2x89AB = vmaxq_f32(vacc2x89AB, vmin);
vacc3x89AB = vmaxq_f32(vacc3x89AB, vmin);
vacc0xCDEF = vmaxq_f32(vacc0xCDEF, vmin);
vacc1xCDEF = vmaxq_f32(vacc1xCDEF, vmin);
vacc2xCDEF = vmaxq_f32(vacc2xCDEF, vmin);
vacc3xCDEF = vmaxq_f32(vacc3xCDEF, vmin);
if XNN_LIKELY(nc >= 16) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
vst1q_f32(c3 + 8, vacc3x89AB);
vst1q_f32(c3 + 12, vacc3xCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
vst1q_f32(c2 + 8, vacc2x89AB);
vst1q_f32(c2 + 12, vacc2xCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
vst1q_f32(c1 + 8, vacc1x89AB);
vst1q_f32(c1 + 12, vacc1xCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
vst1q_f32(c0 + 8, vacc0x89AB);
vst1q_f32(c0 + 12, vacc0xCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float*) ((uintptr_t) a - kc * 4);
nc -= 16;
} else {
if (nc & 8) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c3, vacc3x4567); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c2, vacc2x4567); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c1, vacc1x4567); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vst1q_f32(c0, vacc0x4567); c0 += 4;
vacc3x0123 = vacc3x89AB;
vacc3x4567 = vacc3xCDEF;
vacc2x0123 = vacc2x89AB;
vacc2x4567 = vacc2xCDEF;
vacc1x0123 = vacc1x89AB;
vacc1x4567 = vacc1xCDEF;
vacc0x0123 = vacc0x89AB;
vacc0x4567 = vacc0xCDEF;
}
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc3x4567 = vacc3x89AB;
vacc3x89AB = vacc3xCDEF;
vacc2x0123 = vacc2x4567;
vacc2x4567 = vacc2x89AB;
vacc2x89AB = vacc2xCDEF;
vacc1x0123 = vacc1x4567;
vacc1x4567 = vacc1x89AB;
vacc1x89AB = vacc1xCDEF;
vacc0x0123 = vacc0x4567;
vacc0x4567 = vacc0x89AB;
vacc0x89AB = vacc0xCDEF;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,438 | 36.015686 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ppmm/gen/f32-ppmm-4x16-minmax-neon-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/ppmm.h>
#include <xnnpack/prefetch.h>
void xnn_f32_ppmm_minmax_ukernel_4x16__neon_prfm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
xnn_prefetch_to_l1((const int8_t*) w + 0);
xnn_prefetch_to_l1((const int8_t*) w + 64);
xnn_prefetch_to_l1((const int8_t*) w + 128);
xnn_prefetch_to_l1((const int8_t*) w + 192);
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0x89AB = vld1q_f32(w); w += 4;
float32x4_t vacc0xCDEF = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc1x89AB = vacc0x89AB;
float32x4_t vacc1xCDEF = vacc0xCDEF;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc2x89AB = vacc0x89AB;
float32x4_t vacc2xCDEF = vacc0xCDEF;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc3x89AB = vacc0x89AB;
float32x4_t vacc3xCDEF = vacc0xCDEF;
size_t k = kc;
do {
const float32x4_t va0123 = vld1q_f32(a); a += 4;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
const float32x4_t vb89AB = vld1q_f32(w); w += 4;
const float32x4_t vbCDEF = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123, vget_low_f32(va0123), 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123, vget_low_f32(va0123), 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123, vget_high_f32(va0123), 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123, vget_high_f32(va0123), 1);
xnn_prefetch_to_l1((const int8_t*) w + 192);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567, vget_low_f32(va0123), 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567, vget_low_f32(va0123), 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567, vget_high_f32(va0123), 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567, vget_high_f32(va0123), 1);
vacc0x89AB = vmlaq_lane_f32(vacc0x89AB, vb89AB, vget_low_f32(va0123), 0);
vacc1x89AB = vmlaq_lane_f32(vacc1x89AB, vb89AB, vget_low_f32(va0123), 1);
vacc2x89AB = vmlaq_lane_f32(vacc2x89AB, vb89AB, vget_high_f32(va0123), 0);
vacc3x89AB = vmlaq_lane_f32(vacc3x89AB, vb89AB, vget_high_f32(va0123), 1);
vacc0xCDEF = vmlaq_lane_f32(vacc0xCDEF, vbCDEF, vget_low_f32(va0123), 0);
vacc1xCDEF = vmlaq_lane_f32(vacc1xCDEF, vbCDEF, vget_low_f32(va0123), 1);
vacc2xCDEF = vmlaq_lane_f32(vacc2xCDEF, vbCDEF, vget_high_f32(va0123), 0);
vacc3xCDEF = vmlaq_lane_f32(vacc3xCDEF, vbCDEF, vget_high_f32(va0123), 1);
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc0x89AB = vminq_f32(vacc0x89AB, vmax);
vacc1x89AB = vminq_f32(vacc1x89AB, vmax);
vacc2x89AB = vminq_f32(vacc2x89AB, vmax);
vacc3x89AB = vminq_f32(vacc3x89AB, vmax);
vacc0xCDEF = vminq_f32(vacc0xCDEF, vmax);
vacc1xCDEF = vminq_f32(vacc1xCDEF, vmax);
vacc2xCDEF = vminq_f32(vacc2xCDEF, vmax);
vacc3xCDEF = vminq_f32(vacc3xCDEF, vmax);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc0x89AB = vmaxq_f32(vacc0x89AB, vmin);
vacc1x89AB = vmaxq_f32(vacc1x89AB, vmin);
vacc2x89AB = vmaxq_f32(vacc2x89AB, vmin);
vacc3x89AB = vmaxq_f32(vacc3x89AB, vmin);
vacc0xCDEF = vmaxq_f32(vacc0xCDEF, vmin);
vacc1xCDEF = vmaxq_f32(vacc1xCDEF, vmin);
vacc2xCDEF = vmaxq_f32(vacc2xCDEF, vmin);
vacc3xCDEF = vmaxq_f32(vacc3xCDEF, vmin);
if XNN_LIKELY(nc >= 16) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
vst1q_f32(c3 + 8, vacc3x89AB);
vst1q_f32(c3 + 12, vacc3xCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
vst1q_f32(c2 + 8, vacc2x89AB);
vst1q_f32(c2 + 12, vacc2xCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
vst1q_f32(c1 + 8, vacc1x89AB);
vst1q_f32(c1 + 12, vacc1xCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
vst1q_f32(c0 + 8, vacc0x89AB);
vst1q_f32(c0 + 12, vacc0xCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float*) ((uintptr_t) a - kc * 4);
nc -= 16;
} else {
if (nc & 8) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c3, vacc3x4567); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c2, vacc2x4567); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c1, vacc1x4567); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vst1q_f32(c0, vacc0x4567); c0 += 4;
vacc3x0123 = vacc3x89AB;
vacc3x4567 = vacc3xCDEF;
vacc2x0123 = vacc2x89AB;
vacc2x4567 = vacc2xCDEF;
vacc1x0123 = vacc1x89AB;
vacc1x4567 = vacc1xCDEF;
vacc0x0123 = vacc0x89AB;
vacc0x4567 = vacc0xCDEF;
}
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc3x4567 = vacc3x89AB;
vacc3x89AB = vacc3xCDEF;
vacc2x0123 = vacc2x4567;
vacc2x4567 = vacc2x89AB;
vacc2x89AB = vacc2xCDEF;
vacc1x0123 = vacc1x4567;
vacc1x4567 = vacc1x89AB;
vacc1x89AB = vacc1xCDEF;
vacc0x0123 = vacc0x4567;
vacc0x4567 = vacc0x89AB;
vacc0x89AB = vacc0xCDEF;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,565 | 35.14346 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ppmm/gen/f32-ppmm-4x16-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/ppmm.h>
void xnn_f32_ppmm_minmax_ukernel_4x16__neon(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0x89AB = vld1q_f32(w); w += 4;
float32x4_t vacc0xCDEF = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc1x89AB = vacc0x89AB;
float32x4_t vacc1xCDEF = vacc0xCDEF;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc2x89AB = vacc0x89AB;
float32x4_t vacc2xCDEF = vacc0xCDEF;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc3x89AB = vacc0x89AB;
float32x4_t vacc3xCDEF = vacc0xCDEF;
size_t k = kc;
do {
const float32x4_t va0123 = vld1q_f32(a); a += 4;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
const float32x4_t vb89AB = vld1q_f32(w); w += 4;
const float32x4_t vbCDEF = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123, vget_low_f32(va0123), 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123, vget_low_f32(va0123), 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123, vget_high_f32(va0123), 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123, vget_high_f32(va0123), 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567, vget_low_f32(va0123), 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567, vget_low_f32(va0123), 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567, vget_high_f32(va0123), 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567, vget_high_f32(va0123), 1);
vacc0x89AB = vmlaq_lane_f32(vacc0x89AB, vb89AB, vget_low_f32(va0123), 0);
vacc1x89AB = vmlaq_lane_f32(vacc1x89AB, vb89AB, vget_low_f32(va0123), 1);
vacc2x89AB = vmlaq_lane_f32(vacc2x89AB, vb89AB, vget_high_f32(va0123), 0);
vacc3x89AB = vmlaq_lane_f32(vacc3x89AB, vb89AB, vget_high_f32(va0123), 1);
vacc0xCDEF = vmlaq_lane_f32(vacc0xCDEF, vbCDEF, vget_low_f32(va0123), 0);
vacc1xCDEF = vmlaq_lane_f32(vacc1xCDEF, vbCDEF, vget_low_f32(va0123), 1);
vacc2xCDEF = vmlaq_lane_f32(vacc2xCDEF, vbCDEF, vget_high_f32(va0123), 0);
vacc3xCDEF = vmlaq_lane_f32(vacc3xCDEF, vbCDEF, vget_high_f32(va0123), 1);
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc0x89AB = vminq_f32(vacc0x89AB, vmax);
vacc1x89AB = vminq_f32(vacc1x89AB, vmax);
vacc2x89AB = vminq_f32(vacc2x89AB, vmax);
vacc3x89AB = vminq_f32(vacc3x89AB, vmax);
vacc0xCDEF = vminq_f32(vacc0xCDEF, vmax);
vacc1xCDEF = vminq_f32(vacc1xCDEF, vmax);
vacc2xCDEF = vminq_f32(vacc2xCDEF, vmax);
vacc3xCDEF = vminq_f32(vacc3xCDEF, vmax);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc0x89AB = vmaxq_f32(vacc0x89AB, vmin);
vacc1x89AB = vmaxq_f32(vacc1x89AB, vmin);
vacc2x89AB = vmaxq_f32(vacc2x89AB, vmin);
vacc3x89AB = vmaxq_f32(vacc3x89AB, vmin);
vacc0xCDEF = vmaxq_f32(vacc0xCDEF, vmin);
vacc1xCDEF = vmaxq_f32(vacc1xCDEF, vmin);
vacc2xCDEF = vmaxq_f32(vacc2xCDEF, vmin);
vacc3xCDEF = vmaxq_f32(vacc3xCDEF, vmin);
if XNN_LIKELY(nc >= 16) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
vst1q_f32(c3 + 8, vacc3x89AB);
vst1q_f32(c3 + 12, vacc3xCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
vst1q_f32(c2 + 8, vacc2x89AB);
vst1q_f32(c2 + 12, vacc2xCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
vst1q_f32(c1 + 8, vacc1x89AB);
vst1q_f32(c1 + 12, vacc1xCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
vst1q_f32(c0 + 8, vacc0x89AB);
vst1q_f32(c0 + 12, vacc0xCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float*) ((uintptr_t) a - kc * 4);
nc -= 16;
} else {
if (nc & 8) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c3, vacc3x4567); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c2, vacc2x4567); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c1, vacc1x4567); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vst1q_f32(c0, vacc0x4567); c0 += 4;
vacc3x0123 = vacc3x89AB;
vacc3x4567 = vacc3xCDEF;
vacc2x0123 = vacc2x89AB;
vacc2x4567 = vacc2xCDEF;
vacc1x0123 = vacc1x89AB;
vacc1x4567 = vacc1xCDEF;
vacc0x0123 = vacc0x89AB;
vacc0x4567 = vacc0xCDEF;
}
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc3x4567 = vacc3x89AB;
vacc3x89AB = vacc3xCDEF;
vacc2x0123 = vacc2x4567;
vacc2x4567 = vacc2x89AB;
vacc2x89AB = vacc2xCDEF;
vacc1x0123 = vacc1x4567;
vacc1x4567 = vacc1x89AB;
vacc1x89AB = vacc1xCDEF;
vacc0x0123 = vacc0x4567;
vacc0x4567 = vacc0x89AB;
vacc0x89AB = vacc0xCDEF;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,294 | 34.909091 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ppmm/gen/f32-ppmm-4x2-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/ppmm.h>
void xnn_f32_ppmm_minmax_ukernel_4x2__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
float vacc0x0 = w[0];
float vacc0x1 = w[1];
float vacc1x0 = vacc0x0;
float vacc1x1 = vacc0x1;
float vacc2x0 = vacc0x0;
float vacc2x1 = vacc0x1;
float vacc3x0 = vacc0x0;
float vacc3x1 = vacc0x1;
w += 2;
size_t k = kc;
do {
const float va0 = a[0];
const float va1 = a[1];
const float va2 = a[2];
const float va3 = a[3];
a += 4;
const float vb0 = w[0];
const float vb1 = w[1];
w += 2;
vacc0x0 += va0 * vb0;
vacc1x0 += va1 * vb0;
vacc2x0 += va2 * vb0;
vacc3x0 += va3 * vb0;
vacc0x1 += va0 * vb1;
vacc1x1 += va1 * vb1;
vacc2x1 += va2 * vb1;
vacc3x1 += va3 * vb1;
k -= sizeof(float);
} while (k != 0);
const float vmax = params->scalar.max;
vacc0x0 = math_min_f32(vacc0x0, vmax);
vacc1x0 = math_min_f32(vacc1x0, vmax);
vacc2x0 = math_min_f32(vacc2x0, vmax);
vacc3x0 = math_min_f32(vacc3x0, vmax);
vacc0x1 = math_min_f32(vacc0x1, vmax);
vacc1x1 = math_min_f32(vacc1x1, vmax);
vacc2x1 = math_min_f32(vacc2x1, vmax);
vacc3x1 = math_min_f32(vacc3x1, vmax);
const float vmin = params->scalar.min;
vacc0x0 = math_max_f32(vacc0x0, vmin);
vacc1x0 = math_max_f32(vacc1x0, vmin);
vacc2x0 = math_max_f32(vacc2x0, vmin);
vacc3x0 = math_max_f32(vacc3x0, vmin);
vacc0x1 = math_max_f32(vacc0x1, vmin);
vacc1x1 = math_max_f32(vacc1x1, vmin);
vacc2x1 = math_max_f32(vacc2x1, vmin);
vacc3x1 = math_max_f32(vacc3x1, vmin);
if XNN_LIKELY(nc >= 2) {
c3[0] = vacc3x0;
c3[1] = vacc3x1;
c2[0] = vacc2x0;
c2[1] = vacc2x1;
c1[0] = vacc1x0;
c1[1] = vacc1x1;
c0[0] = vacc0x0;
c0[1] = vacc0x1;
a = (const float*) ((uintptr_t) a - kc * 4);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
nc -= 2;
} else {
if (nc & 1) {
*c3 = vacc3x0;
*c2 = vacc2x0;
*c1 = vacc1x0;
*c0 = vacc0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 3,258 | 23.689394 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ppmm/gen/f32-ppmm-4x4-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/ppmm.h>
void xnn_f32_ppmm_minmax_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
float vacc0x0 = w[0];
float vacc0x1 = w[1];
float vacc0x2 = w[2];
float vacc0x3 = w[3];
float vacc1x0 = vacc0x0;
float vacc1x1 = vacc0x1;
float vacc1x2 = vacc0x2;
float vacc1x3 = vacc0x3;
float vacc2x0 = vacc0x0;
float vacc2x1 = vacc0x1;
float vacc2x2 = vacc0x2;
float vacc2x3 = vacc0x3;
float vacc3x0 = vacc0x0;
float vacc3x1 = vacc0x1;
float vacc3x2 = vacc0x2;
float vacc3x3 = vacc0x3;
w += 4;
size_t k = kc;
do {
const float va0 = a[0];
const float va1 = a[1];
const float va2 = a[2];
const float va3 = a[3];
a += 4;
const float vb0 = w[0];
const float vb1 = w[1];
const float vb2 = w[2];
const float vb3 = w[3];
w += 4;
vacc0x0 += va0 * vb0;
vacc1x0 += va1 * vb0;
vacc2x0 += va2 * vb0;
vacc3x0 += va3 * vb0;
vacc0x1 += va0 * vb1;
vacc1x1 += va1 * vb1;
vacc2x1 += va2 * vb1;
vacc3x1 += va3 * vb1;
vacc0x2 += va0 * vb2;
vacc1x2 += va1 * vb2;
vacc2x2 += va2 * vb2;
vacc3x2 += va3 * vb2;
vacc0x3 += va0 * vb3;
vacc1x3 += va1 * vb3;
vacc2x3 += va2 * vb3;
vacc3x3 += va3 * vb3;
k -= sizeof(float);
} while (k != 0);
const float vmax = params->scalar.max;
vacc0x0 = math_min_f32(vacc0x0, vmax);
vacc1x0 = math_min_f32(vacc1x0, vmax);
vacc2x0 = math_min_f32(vacc2x0, vmax);
vacc3x0 = math_min_f32(vacc3x0, vmax);
vacc0x1 = math_min_f32(vacc0x1, vmax);
vacc1x1 = math_min_f32(vacc1x1, vmax);
vacc2x1 = math_min_f32(vacc2x1, vmax);
vacc3x1 = math_min_f32(vacc3x1, vmax);
vacc0x2 = math_min_f32(vacc0x2, vmax);
vacc1x2 = math_min_f32(vacc1x2, vmax);
vacc2x2 = math_min_f32(vacc2x2, vmax);
vacc3x2 = math_min_f32(vacc3x2, vmax);
vacc0x3 = math_min_f32(vacc0x3, vmax);
vacc1x3 = math_min_f32(vacc1x3, vmax);
vacc2x3 = math_min_f32(vacc2x3, vmax);
vacc3x3 = math_min_f32(vacc3x3, vmax);
const float vmin = params->scalar.min;
vacc0x0 = math_max_f32(vacc0x0, vmin);
vacc1x0 = math_max_f32(vacc1x0, vmin);
vacc2x0 = math_max_f32(vacc2x0, vmin);
vacc3x0 = math_max_f32(vacc3x0, vmin);
vacc0x1 = math_max_f32(vacc0x1, vmin);
vacc1x1 = math_max_f32(vacc1x1, vmin);
vacc2x1 = math_max_f32(vacc2x1, vmin);
vacc3x1 = math_max_f32(vacc3x1, vmin);
vacc0x2 = math_max_f32(vacc0x2, vmin);
vacc1x2 = math_max_f32(vacc1x2, vmin);
vacc2x2 = math_max_f32(vacc2x2, vmin);
vacc3x2 = math_max_f32(vacc3x2, vmin);
vacc0x3 = math_max_f32(vacc0x3, vmin);
vacc1x3 = math_max_f32(vacc1x3, vmin);
vacc2x3 = math_max_f32(vacc2x3, vmin);
vacc3x3 = math_max_f32(vacc3x3, vmin);
if XNN_LIKELY(nc >= 4) {
c3[0] = vacc3x0;
c3[1] = vacc3x1;
c3[2] = vacc3x2;
c3[3] = vacc3x3;
c2[0] = vacc2x0;
c2[1] = vacc2x1;
c2[2] = vacc2x2;
c2[3] = vacc2x3;
c1[0] = vacc1x0;
c1[1] = vacc1x1;
c1[2] = vacc1x2;
c1[3] = vacc1x3;
c0[0] = vacc0x0;
c0[1] = vacc0x1;
c0[2] = vacc0x2;
c0[3] = vacc0x3;
a = (const float*) ((uintptr_t) a - kc * 4);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = vacc3x0;
c3[1] = vacc3x1;
c2[0] = vacc2x0;
c2[1] = vacc2x1;
c1[0] = vacc1x0;
c1[1] = vacc1x1;
c0[0] = vacc0x0;
c0[1] = vacc0x1;
vacc3x0 = vacc3x2;
vacc2x0 = vacc2x2;
vacc1x0 = vacc1x2;
vacc0x0 = vacc0x2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
*c3 = vacc3x0;
*c2 = vacc2x0;
*c1 = vacc1x0;
*c0 = vacc0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 5,046 | 25.015464 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ppmm/gen/f32-ppmm-4x8-minmax-aarch64-neonfma-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/ppmm.h>
#include <xnnpack/prefetch.h>
void xnn_f32_ppmm_minmax_ukernel_4x8__aarch64_neonfma_prfm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
xnn_prefetch_to_l1((const int8_t*) w + 0);
xnn_prefetch_to_l1((const int8_t*) w + 64);
xnn_prefetch_to_l1((const int8_t*) w + 128);
xnn_prefetch_to_l1((const int8_t*) w + 192);
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
do {
const float32x4_t va0123 = vld1q_f32(a); a += 4;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
#if XNN_ARCH_ARM64
vacc0x0123 = vfmaq_laneq_f32(vacc0x0123, vb0123, va0123, 0);
vacc1x0123 = vfmaq_laneq_f32(vacc1x0123, vb0123, va0123, 1);
vacc2x0123 = vfmaq_laneq_f32(vacc2x0123, vb0123, va0123, 2);
vacc3x0123 = vfmaq_laneq_f32(vacc3x0123, vb0123, va0123, 3);
xnn_prefetch_to_l1((const int8_t*) w + 192);
vacc0x4567 = vfmaq_laneq_f32(vacc0x4567, vb4567, va0123, 0);
vacc1x4567 = vfmaq_laneq_f32(vacc1x4567, vb4567, va0123, 1);
vacc2x4567 = vfmaq_laneq_f32(vacc2x4567, vb4567, va0123, 2);
vacc3x4567 = vfmaq_laneq_f32(vacc3x4567, vb4567, va0123, 3);
#else
const float32x4_t va0000 = vdupq_lane_f32(vget_low_f32(va0123), 0);
const float32x4_t va1111 = vdupq_lane_f32(vget_low_f32(va0123), 1);
const float32x4_t va2222 = vdupq_lane_f32(vget_high_f32(va0123), 0);
const float32x4_t va3333 = vdupq_lane_f32(vget_high_f32(va0123), 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0000, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1111, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2222, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3333, vb0123);
xnn_prefetch_to_l1((const int8_t*) w + 192);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0000, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1111, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2222, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3333, vb4567);
#endif
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float*) ((uintptr_t) a - kc * 4);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,326 | 33.2 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ppmm/gen/f32-ppmm-4x8-minmax-aarch64-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/ppmm.h>
void xnn_f32_ppmm_minmax_ukernel_4x8__aarch64_neonfma(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
do {
const float32x4_t va0123 = vld1q_f32(a); a += 4;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
#if XNN_ARCH_ARM64
vacc0x0123 = vfmaq_laneq_f32(vacc0x0123, vb0123, va0123, 0);
vacc1x0123 = vfmaq_laneq_f32(vacc1x0123, vb0123, va0123, 1);
vacc2x0123 = vfmaq_laneq_f32(vacc2x0123, vb0123, va0123, 2);
vacc3x0123 = vfmaq_laneq_f32(vacc3x0123, vb0123, va0123, 3);
vacc0x4567 = vfmaq_laneq_f32(vacc0x4567, vb4567, va0123, 0);
vacc1x4567 = vfmaq_laneq_f32(vacc1x4567, vb4567, va0123, 1);
vacc2x4567 = vfmaq_laneq_f32(vacc2x4567, vb4567, va0123, 2);
vacc3x4567 = vfmaq_laneq_f32(vacc3x4567, vb4567, va0123, 3);
#else
const float32x4_t va0000 = vdupq_lane_f32(vget_low_f32(va0123), 0);
const float32x4_t va1111 = vdupq_lane_f32(vget_low_f32(va0123), 1);
const float32x4_t va2222 = vdupq_lane_f32(vget_high_f32(va0123), 0);
const float32x4_t va3333 = vdupq_lane_f32(vget_high_f32(va0123), 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0000, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1111, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2222, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3333, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0000, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1111, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2222, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3333, vb4567);
#endif
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float*) ((uintptr_t) a - kc * 4);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,000 | 32.713483 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ppmm/gen/f32-ppmm-4x8-minmax-neon-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/ppmm.h>
#include <xnnpack/prefetch.h>
void xnn_f32_ppmm_minmax_ukernel_4x8__neon_prfm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
xnn_prefetch_to_l1((const int8_t*) w + 0);
xnn_prefetch_to_l1((const int8_t*) w + 64);
xnn_prefetch_to_l1((const int8_t*) w + 128);
xnn_prefetch_to_l1((const int8_t*) w + 192);
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
do {
const float32x4_t va0123 = vld1q_f32(a); a += 4;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123, vget_low_f32(va0123), 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123, vget_low_f32(va0123), 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123, vget_high_f32(va0123), 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123, vget_high_f32(va0123), 1);
xnn_prefetch_to_l1((const int8_t*) w + 192);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567, vget_low_f32(va0123), 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567, vget_low_f32(va0123), 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567, vget_high_f32(va0123), 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567, vget_high_f32(va0123), 1);
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float*) ((uintptr_t) a - kc * 4);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,515 | 31.833333 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ppmm/gen/f32-ppmm-4x8-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/ppmm.h>
void xnn_f32_ppmm_minmax_ukernel_4x8__neon(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
do {
const float32x4_t va0123 = vld1q_f32(a); a += 4;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123, vget_low_f32(va0123), 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123, vget_low_f32(va0123), 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123, vget_high_f32(va0123), 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123, vget_high_f32(va0123), 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567, vget_low_f32(va0123), 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567, vget_low_f32(va0123), 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567, vget_high_f32(va0123), 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567, vget_high_f32(va0123), 1);
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float*) ((uintptr_t) a - kc * 4);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,244 | 31.376543 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ppmm/gen/f32-ppmm-4x8-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/ppmm.h>
void xnn_f32_ppmm_minmax_ukernel_4x8__sse(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m128 vacc0x0123 = _mm_load_ps(w);
__m128 vacc0x4567 = _mm_load_ps(w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const __m128 va0123 = _mm_load_ps(a);
a += 4;
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
const __m128 va0000 = _mm_shuffle_ps(va0123, va0123, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 va1111 = _mm_shuffle_ps(va0123, va0123, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 va2222 = _mm_shuffle_ps(va0123, va0123, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 va3333 = _mm_shuffle_ps(va0123, va0123, _MM_SHUFFLE(3, 3, 3, 3));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0000, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1111, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2222, vb0123));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3333, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0000, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1111, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2222, vb4567));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3333, vb4567));
k -= sizeof(float);
} while (k != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
a = (const float*) ((uintptr_t) a - kc * 4);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 5,155 | 29.874251 | 84 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ppmm/gen/f32-ppmm-4x8-minmax-wasmsimd-arm-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/ppmm.h>
void xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const v128_t va0123 = wasm_v128_load(a);
a += 4;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0000 = wasm_v32x4_shuffle(va0123, va0123, 0, 0, 0, 0);
const v128_t va1111 = wasm_v32x4_shuffle(va0123, va0123, 1, 1, 1, 1);
const v128_t va2222 = wasm_v32x4_shuffle(va0123, va0123, 2, 2, 2, 2);
const v128_t va3333 = wasm_v32x4_shuffle(va0123, va0123, 3, 3, 3, 3);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0000, vb0123));
vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1111, vb0123));
vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2222, vb0123));
vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3333, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0000, vb4567));
vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1111, vb4567));
vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2222, vb4567));
vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3333, vb4567));
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
a = (const float*) ((uintptr_t) a - kc * 4);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,451 | 31.646707 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ppmm/gen/f32-ppmm-4x8-minmax-wasmsimd-x86-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/ppmm.h>
void xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const v128_t va0123 = wasm_v128_load(a);
a += 4;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0000 = wasm_v32x4_shuffle(va0123, va0123, 0, 0, 0, 0);
const v128_t va1111 = wasm_v32x4_shuffle(va0123, va0123, 1, 1, 1, 1);
const v128_t va2222 = wasm_v32x4_shuffle(va0123, va0123, 2, 2, 2, 2);
const v128_t va3333 = wasm_v32x4_shuffle(va0123, va0123, 3, 3, 3, 3);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0000, vb0123));
vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1111, vb0123));
vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2222, vb0123));
vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3333, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0000, vb4567));
vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1111, vb4567));
vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2222, vb4567));
vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3333, vb4567));
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
a = (const float*) ((uintptr_t) a - kc * 4);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,467 | 31.742515 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ppmm/gen/f32-ppmm-8x8-minmax-aarch64-neonfma-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/ppmm.h>
#include <xnnpack/prefetch.h>
void xnn_f32_ppmm_minmax_ukernel_8x8__aarch64_neonfma_prfm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 8);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
c5 = c4;
}
float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
c6 = c5;
}
float* c7 = (float*) ((uintptr_t) c6 + cm_stride);
if XNN_UNPREDICTABLE(mr != 8) {
c7 = c6;
}
xnn_prefetch_to_l1((const int8_t*) w + 0);
xnn_prefetch_to_l1((const int8_t*) w + 64);
xnn_prefetch_to_l1((const int8_t*) w + 128);
xnn_prefetch_to_l1((const int8_t*) w + 192);
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
float32x4_t vacc5x0123 = vacc0x0123;
float32x4_t vacc5x4567 = vacc0x4567;
float32x4_t vacc6x0123 = vacc0x0123;
float32x4_t vacc6x4567 = vacc0x4567;
float32x4_t vacc7x0123 = vacc0x0123;
float32x4_t vacc7x4567 = vacc0x4567;
size_t k = kc;
do {
const float32x4_t va0123 = vld1q_f32(a); a += 4;
const float32x4_t va4567 = vld1q_f32(a); a += 4;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
#if XNN_ARCH_ARM64
vacc0x0123 = vfmaq_laneq_f32(vacc0x0123, vb0123, va0123, 0);
vacc1x0123 = vfmaq_laneq_f32(vacc1x0123, vb0123, va0123, 1);
vacc2x0123 = vfmaq_laneq_f32(vacc2x0123, vb0123, va0123, 2);
vacc3x0123 = vfmaq_laneq_f32(vacc3x0123, vb0123, va0123, 3);
vacc4x0123 = vfmaq_laneq_f32(vacc4x0123, vb0123, va4567, 0);
vacc5x0123 = vfmaq_laneq_f32(vacc5x0123, vb0123, va4567, 1);
vacc6x0123 = vfmaq_laneq_f32(vacc6x0123, vb0123, va4567, 2);
vacc7x0123 = vfmaq_laneq_f32(vacc7x0123, vb0123, va4567, 3);
xnn_prefetch_to_l1((const int8_t*) w + 192);
vacc0x4567 = vfmaq_laneq_f32(vacc0x4567, vb4567, va0123, 0);
vacc1x4567 = vfmaq_laneq_f32(vacc1x4567, vb4567, va0123, 1);
vacc2x4567 = vfmaq_laneq_f32(vacc2x4567, vb4567, va0123, 2);
vacc3x4567 = vfmaq_laneq_f32(vacc3x4567, vb4567, va0123, 3);
vacc4x4567 = vfmaq_laneq_f32(vacc4x4567, vb4567, va4567, 0);
vacc5x4567 = vfmaq_laneq_f32(vacc5x4567, vb4567, va4567, 1);
vacc6x4567 = vfmaq_laneq_f32(vacc6x4567, vb4567, va4567, 2);
vacc7x4567 = vfmaq_laneq_f32(vacc7x4567, vb4567, va4567, 3);
#else
const float32x4_t va0000 = vdupq_lane_f32(vget_low_f32(va0123), 0);
const float32x4_t va1111 = vdupq_lane_f32(vget_low_f32(va0123), 1);
const float32x4_t va2222 = vdupq_lane_f32(vget_high_f32(va0123), 0);
const float32x4_t va3333 = vdupq_lane_f32(vget_high_f32(va0123), 1);
const float32x4_t va4444 = vdupq_lane_f32(vget_low_f32(va4567), 0);
const float32x4_t va5555 = vdupq_lane_f32(vget_low_f32(va4567), 1);
const float32x4_t va6666 = vdupq_lane_f32(vget_high_f32(va4567), 0);
const float32x4_t va7777 = vdupq_lane_f32(vget_high_f32(va4567), 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0000, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1111, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2222, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3333, vb0123);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4444, vb0123);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5555, vb0123);
vacc6x0123 = vfmaq_f32(vacc6x0123, va6666, vb0123);
vacc7x0123 = vfmaq_f32(vacc7x0123, va7777, vb0123);
xnn_prefetch_to_l1((const int8_t*) w + 192);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0000, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1111, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2222, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3333, vb4567);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4444, vb4567);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5555, vb4567);
vacc6x4567 = vfmaq_f32(vacc6x4567, va6666, vb4567);
vacc7x4567 = vfmaq_f32(vacc7x4567, va7777, vb4567);
#endif
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc6x0123 = vminq_f32(vacc6x0123, vmax);
vacc7x0123 = vminq_f32(vacc7x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
vacc6x4567 = vminq_f32(vacc6x4567, vmax);
vacc7x4567 = vminq_f32(vacc7x4567, vmax);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc6x0123 = vmaxq_f32(vacc6x0123, vmin);
vacc7x0123 = vmaxq_f32(vacc7x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
vacc6x4567 = vmaxq_f32(vacc6x4567, vmin);
vacc7x4567 = vmaxq_f32(vacc7x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c7, vacc7x0123);
vst1q_f32(c7 + 4, vacc7x4567);
c7 = (float*) ((uintptr_t) c7 + cn_stride);
vst1q_f32(c6, vacc6x0123);
vst1q_f32(c6 + 4, vacc6x4567);
c6 = (float*) ((uintptr_t) c6 + cn_stride);
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float*) ((uintptr_t) a - kc * 8);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c7, vacc7x0123); c7 += 4;
vst1q_f32(c6, vacc6x0123); c6 += 4;
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc7x0123 = vacc7x4567;
vacc6x0123 = vacc6x4567;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc7x01 = vget_low_f32(vacc7x0123);
float32x2_t vacc6x01 = vget_low_f32(vacc6x0123);
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c7, vacc7x01); c7 += 2;
vst1_f32(c6, vacc6x01); c6 += 2;
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc7x01 = vget_high_f32(vacc7x0123);
vacc6x01 = vget_high_f32(vacc6x0123);
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c7, vacc7x01, 0);
vst1_lane_f32(c6, vacc6x01, 0);
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,713 | 36.992908 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ppmm/gen/f32-ppmm-8x8-minmax-aarch64-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/ppmm.h>
void xnn_f32_ppmm_minmax_ukernel_8x8__aarch64_neonfma(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 8);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
c5 = c4;
}
float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
c6 = c5;
}
float* c7 = (float*) ((uintptr_t) c6 + cm_stride);
if XNN_UNPREDICTABLE(mr != 8) {
c7 = c6;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
float32x4_t vacc5x0123 = vacc0x0123;
float32x4_t vacc5x4567 = vacc0x4567;
float32x4_t vacc6x0123 = vacc0x0123;
float32x4_t vacc6x4567 = vacc0x4567;
float32x4_t vacc7x0123 = vacc0x0123;
float32x4_t vacc7x4567 = vacc0x4567;
size_t k = kc;
do {
const float32x4_t va0123 = vld1q_f32(a); a += 4;
const float32x4_t va4567 = vld1q_f32(a); a += 4;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
#if XNN_ARCH_ARM64
vacc0x0123 = vfmaq_laneq_f32(vacc0x0123, vb0123, va0123, 0);
vacc1x0123 = vfmaq_laneq_f32(vacc1x0123, vb0123, va0123, 1);
vacc2x0123 = vfmaq_laneq_f32(vacc2x0123, vb0123, va0123, 2);
vacc3x0123 = vfmaq_laneq_f32(vacc3x0123, vb0123, va0123, 3);
vacc4x0123 = vfmaq_laneq_f32(vacc4x0123, vb0123, va4567, 0);
vacc5x0123 = vfmaq_laneq_f32(vacc5x0123, vb0123, va4567, 1);
vacc6x0123 = vfmaq_laneq_f32(vacc6x0123, vb0123, va4567, 2);
vacc7x0123 = vfmaq_laneq_f32(vacc7x0123, vb0123, va4567, 3);
vacc0x4567 = vfmaq_laneq_f32(vacc0x4567, vb4567, va0123, 0);
vacc1x4567 = vfmaq_laneq_f32(vacc1x4567, vb4567, va0123, 1);
vacc2x4567 = vfmaq_laneq_f32(vacc2x4567, vb4567, va0123, 2);
vacc3x4567 = vfmaq_laneq_f32(vacc3x4567, vb4567, va0123, 3);
vacc4x4567 = vfmaq_laneq_f32(vacc4x4567, vb4567, va4567, 0);
vacc5x4567 = vfmaq_laneq_f32(vacc5x4567, vb4567, va4567, 1);
vacc6x4567 = vfmaq_laneq_f32(vacc6x4567, vb4567, va4567, 2);
vacc7x4567 = vfmaq_laneq_f32(vacc7x4567, vb4567, va4567, 3);
#else
const float32x4_t va0000 = vdupq_lane_f32(vget_low_f32(va0123), 0);
const float32x4_t va1111 = vdupq_lane_f32(vget_low_f32(va0123), 1);
const float32x4_t va2222 = vdupq_lane_f32(vget_high_f32(va0123), 0);
const float32x4_t va3333 = vdupq_lane_f32(vget_high_f32(va0123), 1);
const float32x4_t va4444 = vdupq_lane_f32(vget_low_f32(va4567), 0);
const float32x4_t va5555 = vdupq_lane_f32(vget_low_f32(va4567), 1);
const float32x4_t va6666 = vdupq_lane_f32(vget_high_f32(va4567), 0);
const float32x4_t va7777 = vdupq_lane_f32(vget_high_f32(va4567), 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0000, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1111, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2222, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3333, vb0123);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4444, vb0123);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5555, vb0123);
vacc6x0123 = vfmaq_f32(vacc6x0123, va6666, vb0123);
vacc7x0123 = vfmaq_f32(vacc7x0123, va7777, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0000, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1111, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2222, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3333, vb4567);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4444, vb4567);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5555, vb4567);
vacc6x4567 = vfmaq_f32(vacc6x4567, va6666, vb4567);
vacc7x4567 = vfmaq_f32(vacc7x4567, va7777, vb4567);
#endif
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc6x0123 = vminq_f32(vacc6x0123, vmax);
vacc7x0123 = vminq_f32(vacc7x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
vacc6x4567 = vminq_f32(vacc6x4567, vmax);
vacc7x4567 = vminq_f32(vacc7x4567, vmax);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc6x0123 = vmaxq_f32(vacc6x0123, vmin);
vacc7x0123 = vmaxq_f32(vacc7x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
vacc6x4567 = vmaxq_f32(vacc6x4567, vmin);
vacc7x4567 = vmaxq_f32(vacc7x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c7, vacc7x0123);
vst1q_f32(c7 + 4, vacc7x4567);
c7 = (float*) ((uintptr_t) c7 + cn_stride);
vst1q_f32(c6, vacc6x0123);
vst1q_f32(c6 + 4, vacc6x4567);
c6 = (float*) ((uintptr_t) c6 + cn_stride);
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float*) ((uintptr_t) a - kc * 8);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c7, vacc7x0123); c7 += 4;
vst1q_f32(c6, vacc6x0123); c6 += 4;
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc7x0123 = vacc7x4567;
vacc6x0123 = vacc6x4567;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc7x01 = vget_low_f32(vacc7x0123);
float32x2_t vacc6x01 = vget_low_f32(vacc6x0123);
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c7, vacc7x01); c7 += 2;
vst1_f32(c6, vacc6x01); c6 += 2;
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc7x01 = vget_high_f32(vacc7x0123);
vacc6x01 = vget_high_f32(vacc6x0123);
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c7, vacc7x01, 0);
vst1_lane_f32(c6, vacc6x01, 0);
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,387 | 36.774545 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ppmm/gen/f32-ppmm-8x8-minmax-neon-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/ppmm.h>
#include <xnnpack/prefetch.h>
void xnn_f32_ppmm_minmax_ukernel_8x8__neon_prfm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 8);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
c5 = c4;
}
float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
c6 = c5;
}
float* c7 = (float*) ((uintptr_t) c6 + cm_stride);
if XNN_UNPREDICTABLE(mr != 8) {
c7 = c6;
}
xnn_prefetch_to_l1((const int8_t*) w + 0);
xnn_prefetch_to_l1((const int8_t*) w + 64);
xnn_prefetch_to_l1((const int8_t*) w + 128);
xnn_prefetch_to_l1((const int8_t*) w + 192);
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
float32x4_t vacc5x0123 = vacc0x0123;
float32x4_t vacc5x4567 = vacc0x4567;
float32x4_t vacc6x0123 = vacc0x0123;
float32x4_t vacc6x4567 = vacc0x4567;
float32x4_t vacc7x0123 = vacc0x0123;
float32x4_t vacc7x4567 = vacc0x4567;
size_t k = kc;
do {
const float32x4_t va0123 = vld1q_f32(a); a += 4;
const float32x4_t va4567 = vld1q_f32(a); a += 4;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123, vget_low_f32(va0123), 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123, vget_low_f32(va0123), 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123, vget_high_f32(va0123), 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123, vget_high_f32(va0123), 1);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123, vget_low_f32(va4567), 0);
vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123, vget_low_f32(va4567), 1);
vacc6x0123 = vmlaq_lane_f32(vacc6x0123, vb0123, vget_high_f32(va4567), 0);
vacc7x0123 = vmlaq_lane_f32(vacc7x0123, vb0123, vget_high_f32(va4567), 1);
xnn_prefetch_to_l1((const int8_t*) w + 192);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567, vget_low_f32(va0123), 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567, vget_low_f32(va0123), 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567, vget_high_f32(va0123), 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567, vget_high_f32(va0123), 1);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567, vget_low_f32(va4567), 0);
vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567, vget_low_f32(va4567), 1);
vacc6x4567 = vmlaq_lane_f32(vacc6x4567, vb4567, vget_high_f32(va4567), 0);
vacc7x4567 = vmlaq_lane_f32(vacc7x4567, vb4567, vget_high_f32(va4567), 1);
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc6x0123 = vminq_f32(vacc6x0123, vmax);
vacc7x0123 = vminq_f32(vacc7x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
vacc6x4567 = vminq_f32(vacc6x4567, vmax);
vacc7x4567 = vminq_f32(vacc7x4567, vmax);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc6x0123 = vmaxq_f32(vacc6x0123, vmin);
vacc7x0123 = vmaxq_f32(vacc7x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
vacc6x4567 = vmaxq_f32(vacc6x4567, vmin);
vacc7x4567 = vmaxq_f32(vacc7x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c7, vacc7x0123);
vst1q_f32(c7 + 4, vacc7x4567);
c7 = (float*) ((uintptr_t) c7 + cn_stride);
vst1q_f32(c6, vacc6x0123);
vst1q_f32(c6 + 4, vacc6x4567);
c6 = (float*) ((uintptr_t) c6 + cn_stride);
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float*) ((uintptr_t) a - kc * 8);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c7, vacc7x0123); c7 += 4;
vst1q_f32(c6, vacc6x0123); c6 += 4;
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc7x0123 = vacc7x4567;
vacc6x0123 = vacc6x4567;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc7x01 = vget_low_f32(vacc7x0123);
float32x2_t vacc6x01 = vget_low_f32(vacc6x0123);
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c7, vacc7x01); c7 += 2;
vst1_f32(c6, vacc6x01); c6 += 2;
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc7x01 = vget_high_f32(vacc7x0123);
vacc6x01 = vget_high_f32(vacc6x0123);
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c7, vacc7x01, 0);
vst1_lane_f32(c6, vacc6x01, 0);
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,208 | 35.399209 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ppmm/gen/f32-ppmm-8x8-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ppmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/ppmm.h>
void xnn_f32_ppmm_minmax_ukernel_8x8__neon(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 8);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
c5 = c4;
}
float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
c6 = c5;
}
float* c7 = (float*) ((uintptr_t) c6 + cm_stride);
if XNN_UNPREDICTABLE(mr != 8) {
c7 = c6;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
float32x4_t vacc5x0123 = vacc0x0123;
float32x4_t vacc5x4567 = vacc0x4567;
float32x4_t vacc6x0123 = vacc0x0123;
float32x4_t vacc6x4567 = vacc0x4567;
float32x4_t vacc7x0123 = vacc0x0123;
float32x4_t vacc7x4567 = vacc0x4567;
size_t k = kc;
do {
const float32x4_t va0123 = vld1q_f32(a); a += 4;
const float32x4_t va4567 = vld1q_f32(a); a += 4;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123, vget_low_f32(va0123), 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123, vget_low_f32(va0123), 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123, vget_high_f32(va0123), 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123, vget_high_f32(va0123), 1);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123, vget_low_f32(va4567), 0);
vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123, vget_low_f32(va4567), 1);
vacc6x0123 = vmlaq_lane_f32(vacc6x0123, vb0123, vget_high_f32(va4567), 0);
vacc7x0123 = vmlaq_lane_f32(vacc7x0123, vb0123, vget_high_f32(va4567), 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567, vget_low_f32(va0123), 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567, vget_low_f32(va0123), 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567, vget_high_f32(va0123), 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567, vget_high_f32(va0123), 1);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567, vget_low_f32(va4567), 0);
vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567, vget_low_f32(va4567), 1);
vacc6x4567 = vmlaq_lane_f32(vacc6x4567, vb4567, vget_high_f32(va4567), 0);
vacc7x4567 = vmlaq_lane_f32(vacc7x4567, vb4567, vget_high_f32(va4567), 1);
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc6x0123 = vminq_f32(vacc6x0123, vmax);
vacc7x0123 = vminq_f32(vacc7x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
vacc6x4567 = vminq_f32(vacc6x4567, vmax);
vacc7x4567 = vminq_f32(vacc7x4567, vmax);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc6x0123 = vmaxq_f32(vacc6x0123, vmin);
vacc7x0123 = vmaxq_f32(vacc7x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
vacc6x4567 = vmaxq_f32(vacc6x4567, vmin);
vacc7x4567 = vmaxq_f32(vacc7x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c7, vacc7x0123);
vst1q_f32(c7 + 4, vacc7x4567);
c7 = (float*) ((uintptr_t) c7 + cn_stride);
vst1q_f32(c6, vacc6x0123);
vst1q_f32(c6 + 4, vacc6x4567);
c6 = (float*) ((uintptr_t) c6 + cn_stride);
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float*) ((uintptr_t) a - kc * 8);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c7, vacc7x0123); c7 += 4;
vst1q_f32(c6, vacc6x0123); c6 += 4;
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc7x0123 = vacc7x4567;
vacc6x0123 = vacc6x4567;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc7x01 = vget_low_f32(vacc7x0123);
float32x2_t vacc6x01 = vget_low_f32(vacc6x0123);
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c7, vacc7x01); c7 += 2;
vst1_f32(c6, vacc6x01); c6 += 2;
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc7x01 = vget_high_f32(vacc7x0123);
vacc6x01 = vget_high_f32(vacc6x0123);
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c7, vacc7x01, 0);
vst1_lane_f32(c6, vacc6x01, 0);
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,937 | 35.186235 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-avx-2x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/avx.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_prelu_ukernel__avx_2x16(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride)
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 16 * sizeof(float); c -= 16 * sizeof(float)) {
const __m256 vw01234567 = _mm256_load_ps(w);
const __m256 vw89ABCDEF = _mm256_load_ps(w + 8);
w += 16;
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
const __m256 vprod0x01234567 = _mm256_mul_ps(vi0x01234567, vw01234567);
const __m256 vprod0x89ABCDEF = _mm256_mul_ps(vi0x89ABCDEF, vw89ABCDEF);
const __m256 vprod1x01234567 = _mm256_mul_ps(vi1x01234567, vw01234567);
const __m256 vprod1x89ABCDEF = _mm256_mul_ps(vi1x89ABCDEF, vw89ABCDEF);
const __m256 vacc0x01234567 = _mm256_blendv_ps(vi0x01234567, vprod0x01234567, vi0x01234567);
const __m256 vacc0x89ABCDEF = _mm256_blendv_ps(vi0x89ABCDEF, vprod0x89ABCDEF, vi0x89ABCDEF);
const __m256 vacc1x01234567 = _mm256_blendv_ps(vi1x01234567, vprod1x01234567, vi1x01234567);
const __m256 vacc1x89ABCDEF = _mm256_blendv_ps(vi1x89ABCDEF, vprod1x89ABCDEF, vi1x89ABCDEF);
_mm256_storeu_ps(o0, vacc0x01234567);
_mm256_storeu_ps(o0 + 8, vacc0x89ABCDEF);
o0 += 16;
_mm256_storeu_ps(o1, vacc1x01234567);
_mm256_storeu_ps(o1 + 8, vacc1x89ABCDEF);
o1 += 16;
}
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const __m256 vw = _mm256_load_ps(w);
w += 8;
const __m256 vi0 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vi1 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vprod0 = _mm256_mul_ps(vi0, vw);
const __m256 vprod1 = _mm256_mul_ps(vi1, vw);
const __m256 vacc0 = _mm256_blendv_ps(vi0, vprod0, vi0);
const __m256 vacc1 = _mm256_blendv_ps(vi1, vprod1, vi1);
_mm256_storeu_ps(o0, vacc0);
o0 += 8;
_mm256_storeu_ps(o1, vacc1);
o1 += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1 * sizeof(float));
assert(c <= 7 * sizeof(float));
__m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - c));
const __m256 vw = _mm256_maskload_ps(w, vmask);
const __m256 vi0 = _mm256_maskload_ps(i0, vmask);
i0 = (const float*) ((uintptr_t) i0 + c);
const __m256 vi1 = _mm256_maskload_ps(i1, vmask);
i1 = (const float*) ((uintptr_t) i1 + c);
const __m256 vprod0 = _mm256_mul_ps(vi0, vw);
const __m256 vprod1 = _mm256_mul_ps(vi1, vw);
__m256 vacc0 = _mm256_blendv_ps(vi0, vprod0, vi0);
__m256 vacc1 = _mm256_blendv_ps(vi1, vprod1, vi1);
__m128 vacc0_lo = _mm256_castps256_ps128(vacc0);
__m128 vacc1_lo = _mm256_castps256_ps128(vacc1);
if (c & (4 * sizeof(float))) {
_mm_storeu_ps(o0, vacc0_lo);
_mm_storeu_ps(o1, vacc1_lo);
vacc0_lo = _mm256_extractf128_ps(vacc0, 1);
vacc1_lo = _mm256_extractf128_ps(vacc1, 1);
o0 += 4;
o1 += 4;
}
if (c & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vacc0_lo);
_mm_storel_pi((__m64*) o1, vacc1_lo);
vacc0_lo = _mm_movehl_ps(vacc0_lo, vacc0_lo);
vacc1_lo = _mm_movehl_ps(vacc1_lo, vacc1_lo);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
_mm_store_ss(o0, vacc0_lo);
_mm_store_ss(o1, vacc1_lo);
o0 += 1;
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 4,942 | 31.30719 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-avx-2x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/avx.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_prelu_ukernel__avx_2x8(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride)
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const __m256 vw01234567 = _mm256_load_ps(w);
w += 8;
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vprod0x01234567 = _mm256_mul_ps(vi0x01234567, vw01234567);
const __m256 vprod1x01234567 = _mm256_mul_ps(vi1x01234567, vw01234567);
const __m256 vacc0x01234567 = _mm256_blendv_ps(vi0x01234567, vprod0x01234567, vi0x01234567);
const __m256 vacc1x01234567 = _mm256_blendv_ps(vi1x01234567, vprod1x01234567, vi1x01234567);
_mm256_storeu_ps(o0, vacc0x01234567);
o0 += 8;
_mm256_storeu_ps(o1, vacc1x01234567);
o1 += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1 * sizeof(float));
assert(c <= 7 * sizeof(float));
__m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - c));
const __m256 vw = _mm256_maskload_ps(w, vmask);
const __m256 vi0 = _mm256_maskload_ps(i0, vmask);
i0 = (const float*) ((uintptr_t) i0 + c);
const __m256 vi1 = _mm256_maskload_ps(i1, vmask);
i1 = (const float*) ((uintptr_t) i1 + c);
const __m256 vprod0 = _mm256_mul_ps(vi0, vw);
const __m256 vprod1 = _mm256_mul_ps(vi1, vw);
__m256 vacc0 = _mm256_blendv_ps(vi0, vprod0, vi0);
__m256 vacc1 = _mm256_blendv_ps(vi1, vprod1, vi1);
__m128 vacc0_lo = _mm256_castps256_ps128(vacc0);
__m128 vacc1_lo = _mm256_castps256_ps128(vacc1);
if (c & (4 * sizeof(float))) {
_mm_storeu_ps(o0, vacc0_lo);
_mm_storeu_ps(o1, vacc1_lo);
vacc0_lo = _mm256_extractf128_ps(vacc0, 1);
vacc1_lo = _mm256_extractf128_ps(vacc1, 1);
o0 += 4;
o1 += 4;
}
if (c & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vacc0_lo);
_mm_storel_pi((__m64*) o1, vacc1_lo);
vacc0_lo = _mm_movehl_ps(vacc0_lo, vacc0_lo);
vacc1_lo = _mm_movehl_ps(vacc1_lo, vacc1_lo);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
_mm_store_ss(o0, vacc0_lo);
_mm_store_ss(o1, vacc1_lo);
o0 += 1;
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 3,731 | 29.096774 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-neon-1x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__neon_1x16(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const size_t input_increment = input_stride * 1 - channels;
const size_t output_increment = output_stride * 1 - channels;
do {
const float* w = weights;
size_t c = channels;
for (; c >= 16 * sizeof(float); c -= 16 * sizeof(float)) {
const float32x4_t vw0123 = vld1q_f32(w); w += 4;
const float32x4_t vw4567 = vld1q_f32(w); w += 4;
const float32x4_t vw89AB = vld1q_f32(w); w += 4;
const float32x4_t vwCDEF = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
float32x4_t vacc0x4567 = vmulq_f32(vi0x4567, vw4567);
const uint32x4_t vm0x4567 = vcltq_s32(vreinterpretq_s32_f32(vi0x4567), vmovq_n_s32(0));
float32x4_t vacc0x89AB = vmulq_f32(vi0x89AB, vw89AB);
const uint32x4_t vm0x89AB = vcltq_s32(vreinterpretq_s32_f32(vi0x89AB), vmovq_n_s32(0));
float32x4_t vacc0xCDEF = vmulq_f32(vi0xCDEF, vwCDEF);
const uint32x4_t vm0xCDEF = vcltq_s32(vreinterpretq_s32_f32(vi0xCDEF), vmovq_n_s32(0));
vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
vacc0x4567 = vbslq_f32(vm0x4567, vacc0x4567, vi0x4567);
vacc0x89AB = vbslq_f32(vm0x89AB, vacc0x89AB, vi0x89AB);
vacc0xCDEF = vbslq_f32(vm0xCDEF, vacc0xCDEF, vi0xCDEF);
vst1q_f32(o0, vacc0x0123); o0 += 4;
vst1q_f32(o0, vacc0x4567); o0 += 4;
vst1q_f32(o0, vacc0x89AB); o0 += 4;
vst1q_f32(o0, vacc0xCDEF); o0 += 4;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const float32x4_t vw0123 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
i0 += 4;
float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
vst1q_f32(o0, vacc0x0123); o0 += 4;
}
if XNN_UNLIKELY(c != 0) {
const float32x4_t vw0123 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (c & (2 * sizeof(float))) {
vst1_f32(o0, vacc0x01); o0 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (c & (1 * sizeof(float))) {
vst1_lane_f32(o0, vacc0x01, 0); o0 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
rows = doz(rows, 1);
} while (rows != 0);
}
| 3,805 | 33.6 | 93 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-neon-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__neon_1x4(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const size_t input_increment = input_stride * 1 - channels;
const size_t output_increment = output_stride * 1 - channels;
do {
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const float32x4_t vw0123 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
vst1q_f32(o0, vacc0x0123); o0 += 4;
}
if XNN_UNLIKELY(c != 0) {
const float32x4_t vw0123 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (c & (2 * sizeof(float))) {
vst1_f32(o0, vacc0x01); o0 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (c & (1 * sizeof(float))) {
vst1_lane_f32(o0, vacc0x01, 0); o0 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
rows = doz(rows, 1);
} while (rows != 0);
}
| 2,237 | 27.329114 | 93 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-neon-1x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__neon_1x8(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const size_t input_increment = input_stride * 1 - channels;
const size_t output_increment = output_stride * 1 - channels;
do {
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const float32x4_t vw0123 = vld1q_f32(w); w += 4;
const float32x4_t vw4567 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
float32x4_t vacc0x4567 = vmulq_f32(vi0x4567, vw4567);
const uint32x4_t vm0x4567 = vcltq_s32(vreinterpretq_s32_f32(vi0x4567), vmovq_n_s32(0));
vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
vacc0x4567 = vbslq_f32(vm0x4567, vacc0x4567, vi0x4567);
vst1q_f32(o0, vacc0x0123); o0 += 4;
vst1q_f32(o0, vacc0x4567); o0 += 4;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const float32x4_t vw0123 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
i0 += 4;
float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
vst1q_f32(o0, vacc0x0123); o0 += 4;
}
if XNN_UNLIKELY(c != 0) {
const float32x4_t vw0123 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (c & (2 * sizeof(float))) {
vst1_f32(o0, vacc0x01); o0 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (c & (1 * sizeof(float))) {
vst1_lane_f32(o0, vacc0x01, 0); o0 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
rows = doz(rows, 1);
} while (rows != 0);
}
| 3,058 | 30.214286 | 93 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-neon-2x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__neon_2x16(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 16 * sizeof(float); c -= 16 * sizeof(float)) {
const float32x4_t vw0123 = vld1q_f32(w); w += 4;
const float32x4_t vw4567 = vld1q_f32(w); w += 4;
const float32x4_t vw89AB = vld1q_f32(w); w += 4;
const float32x4_t vwCDEF = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
float32x4_t vacc0x4567 = vmulq_f32(vi0x4567, vw4567);
const uint32x4_t vm0x4567 = vcltq_s32(vreinterpretq_s32_f32(vi0x4567), vmovq_n_s32(0));
float32x4_t vacc0x89AB = vmulq_f32(vi0x89AB, vw89AB);
const uint32x4_t vm0x89AB = vcltq_s32(vreinterpretq_s32_f32(vi0x89AB), vmovq_n_s32(0));
float32x4_t vacc0xCDEF = vmulq_f32(vi0xCDEF, vwCDEF);
const uint32x4_t vm0xCDEF = vcltq_s32(vreinterpretq_s32_f32(vi0xCDEF), vmovq_n_s32(0));
float32x4_t vacc1x0123 = vmulq_f32(vi1x0123, vw0123);
const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0));
float32x4_t vacc1x4567 = vmulq_f32(vi1x4567, vw4567);
const uint32x4_t vm1x4567 = vcltq_s32(vreinterpretq_s32_f32(vi1x4567), vmovq_n_s32(0));
float32x4_t vacc1x89AB = vmulq_f32(vi1x89AB, vw89AB);
const uint32x4_t vm1x89AB = vcltq_s32(vreinterpretq_s32_f32(vi1x89AB), vmovq_n_s32(0));
float32x4_t vacc1xCDEF = vmulq_f32(vi1xCDEF, vwCDEF);
const uint32x4_t vm1xCDEF = vcltq_s32(vreinterpretq_s32_f32(vi1xCDEF), vmovq_n_s32(0));
vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
vacc0x4567 = vbslq_f32(vm0x4567, vacc0x4567, vi0x4567);
vacc0x89AB = vbslq_f32(vm0x89AB, vacc0x89AB, vi0x89AB);
vacc0xCDEF = vbslq_f32(vm0xCDEF, vacc0xCDEF, vi0xCDEF);
vacc1x0123 = vbslq_f32(vm1x0123, vacc1x0123, vi1x0123);
vacc1x4567 = vbslq_f32(vm1x4567, vacc1x4567, vi1x4567);
vacc1x89AB = vbslq_f32(vm1x89AB, vacc1x89AB, vi1x89AB);
vacc1xCDEF = vbslq_f32(vm1xCDEF, vacc1xCDEF, vi1xCDEF);
vst1q_f32(o0, vacc0x0123); o0 += 4;
vst1q_f32(o0, vacc0x4567); o0 += 4;
vst1q_f32(o0, vacc0x89AB); o0 += 4;
vst1q_f32(o0, vacc0xCDEF); o0 += 4;
vst1q_f32(o1, vacc1x0123); o1 += 4;
vst1q_f32(o1, vacc1x4567); o1 += 4;
vst1q_f32(o1, vacc1x89AB); o1 += 4;
vst1q_f32(o1, vacc1xCDEF); o1 += 4;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const float32x4_t vw0123 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
i0 += 4;
const float32x4_t vi1x0123 = vld1q_f32(i1);
i1 += 4;
float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
float32x4_t vacc1x0123 = vmulq_f32(vi1x0123, vw0123);
const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0));
vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
vacc1x0123 = vbslq_f32(vm1x0123, vacc1x0123, vi1x0123);
vst1q_f32(o0, vacc0x0123); o0 += 4;
vst1q_f32(o1, vacc1x0123); o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const float32x4_t vw0123 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
const float32x4_t vi1x0123 = vld1q_f32(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
float32x4_t vacc1x0123 = vmulq_f32(vi1x0123, vw0123);
const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0));
vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
vacc1x0123 = vbslq_f32(vm1x0123, vacc1x0123, vi1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
if (c & (2 * sizeof(float))) {
vst1_f32(o0, vacc0x01); o0 += 2;
vst1_f32(o1, vacc1x01); o1 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
}
if (c & (1 * sizeof(float))) {
vst1_lane_f32(o0, vacc0x01, 0); o0 += 1;
vst1_lane_f32(o1, vacc1x01, 0); o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 6,214 | 39.620915 | 93 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-neon-2x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__neon_2x4(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const float32x4_t vw0123 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
float32x4_t vacc1x0123 = vmulq_f32(vi1x0123, vw0123);
const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0));
vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
vacc1x0123 = vbslq_f32(vm1x0123, vacc1x0123, vi1x0123);
vst1q_f32(o0, vacc0x0123); o0 += 4;
vst1q_f32(o1, vacc1x0123); o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const float32x4_t vw0123 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
const float32x4_t vi1x0123 = vld1q_f32(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
float32x4_t vacc1x0123 = vmulq_f32(vi1x0123, vw0123);
const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0));
vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
vacc1x0123 = vbslq_f32(vm1x0123, vacc1x0123, vi1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
if (c & (2 * sizeof(float))) {
vst1_f32(o0, vacc0x01); o0 += 2;
vst1_f32(o1, vacc1x01); o1 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
}
if (c & (1 * sizeof(float))) {
vst1_lane_f32(o0, vacc0x01, 0); o0 += 1;
vst1_lane_f32(o1, vacc1x01, 0); o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 3,372 | 32.39604 | 93 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-neon-2x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__neon_2x8(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const float32x4_t vw0123 = vld1q_f32(w); w += 4;
const float32x4_t vw4567 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
float32x4_t vacc0x4567 = vmulq_f32(vi0x4567, vw4567);
const uint32x4_t vm0x4567 = vcltq_s32(vreinterpretq_s32_f32(vi0x4567), vmovq_n_s32(0));
float32x4_t vacc1x0123 = vmulq_f32(vi1x0123, vw0123);
const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0));
float32x4_t vacc1x4567 = vmulq_f32(vi1x4567, vw4567);
const uint32x4_t vm1x4567 = vcltq_s32(vreinterpretq_s32_f32(vi1x4567), vmovq_n_s32(0));
vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
vacc0x4567 = vbslq_f32(vm0x4567, vacc0x4567, vi0x4567);
vacc1x0123 = vbslq_f32(vm1x0123, vacc1x0123, vi1x0123);
vacc1x4567 = vbslq_f32(vm1x4567, vacc1x4567, vi1x4567);
vst1q_f32(o0, vacc0x0123); o0 += 4;
vst1q_f32(o0, vacc0x4567); o0 += 4;
vst1q_f32(o1, vacc1x0123); o1 += 4;
vst1q_f32(o1, vacc1x4567); o1 += 4;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const float32x4_t vw0123 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
i0 += 4;
const float32x4_t vi1x0123 = vld1q_f32(i1);
i1 += 4;
float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
float32x4_t vacc1x0123 = vmulq_f32(vi1x0123, vw0123);
const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0));
vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
vacc1x0123 = vbslq_f32(vm1x0123, vacc1x0123, vi1x0123);
vst1q_f32(o0, vacc0x0123); o0 += 4;
vst1q_f32(o1, vacc1x0123); o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const float32x4_t vw0123 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
const float32x4_t vi1x0123 = vld1q_f32(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
float32x4_t vacc1x0123 = vmulq_f32(vi1x0123, vw0123);
const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0));
vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
vacc1x0123 = vbslq_f32(vm1x0123, vacc1x0123, vi1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
if (c & (2 * sizeof(float))) {
vst1_f32(o0, vacc0x01); o0 += 2;
vst1_f32(o1, vacc1x01); o1 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
}
if (c & (1 * sizeof(float))) {
vst1_lane_f32(o0, vacc0x01, 0); o0 += 1;
vst1_lane_f32(o1, vacc1x01, 0); o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 4,833 | 35.900763 | 93 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-neon-4x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__neon_4x16(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
float* o2 = (float*) ((uintptr_t) o1 + output_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
float* o3 = (float*) ((uintptr_t) o2 + output_stride);
const size_t input_increment = input_stride * 4 - channels;
const size_t output_increment = output_stride * 4 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = i1;
o2 = o1;
}
if XNN_UNPREDICTABLE(rows < 4) {
i3 = i2;
o3 = o2;
}
const float* w = weights;
size_t c = channels;
for (; c >= 16 * sizeof(float); c -= 16 * sizeof(float)) {
const float32x4_t vw0123 = vld1q_f32(w); w += 4;
const float32x4_t vw4567 = vld1q_f32(w); w += 4;
const float32x4_t vw89AB = vld1q_f32(w); w += 4;
const float32x4_t vwCDEF = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2xCDEF = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3xCDEF = vld1q_f32(i3); i3 += 4;
float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
float32x4_t vacc0x4567 = vmulq_f32(vi0x4567, vw4567);
const uint32x4_t vm0x4567 = vcltq_s32(vreinterpretq_s32_f32(vi0x4567), vmovq_n_s32(0));
float32x4_t vacc0x89AB = vmulq_f32(vi0x89AB, vw89AB);
const uint32x4_t vm0x89AB = vcltq_s32(vreinterpretq_s32_f32(vi0x89AB), vmovq_n_s32(0));
float32x4_t vacc0xCDEF = vmulq_f32(vi0xCDEF, vwCDEF);
const uint32x4_t vm0xCDEF = vcltq_s32(vreinterpretq_s32_f32(vi0xCDEF), vmovq_n_s32(0));
float32x4_t vacc1x0123 = vmulq_f32(vi1x0123, vw0123);
const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0));
float32x4_t vacc1x4567 = vmulq_f32(vi1x4567, vw4567);
const uint32x4_t vm1x4567 = vcltq_s32(vreinterpretq_s32_f32(vi1x4567), vmovq_n_s32(0));
float32x4_t vacc1x89AB = vmulq_f32(vi1x89AB, vw89AB);
const uint32x4_t vm1x89AB = vcltq_s32(vreinterpretq_s32_f32(vi1x89AB), vmovq_n_s32(0));
float32x4_t vacc1xCDEF = vmulq_f32(vi1xCDEF, vwCDEF);
const uint32x4_t vm1xCDEF = vcltq_s32(vreinterpretq_s32_f32(vi1xCDEF), vmovq_n_s32(0));
float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123);
const uint32x4_t vm2x0123 = vcltq_s32(vreinterpretq_s32_f32(vi2x0123), vmovq_n_s32(0));
float32x4_t vacc2x4567 = vmulq_f32(vi2x4567, vw4567);
const uint32x4_t vm2x4567 = vcltq_s32(vreinterpretq_s32_f32(vi2x4567), vmovq_n_s32(0));
float32x4_t vacc2x89AB = vmulq_f32(vi2x89AB, vw89AB);
const uint32x4_t vm2x89AB = vcltq_s32(vreinterpretq_s32_f32(vi2x89AB), vmovq_n_s32(0));
float32x4_t vacc2xCDEF = vmulq_f32(vi2xCDEF, vwCDEF);
const uint32x4_t vm2xCDEF = vcltq_s32(vreinterpretq_s32_f32(vi2xCDEF), vmovq_n_s32(0));
float32x4_t vacc3x0123 = vmulq_f32(vi3x0123, vw0123);
const uint32x4_t vm3x0123 = vcltq_s32(vreinterpretq_s32_f32(vi3x0123), vmovq_n_s32(0));
float32x4_t vacc3x4567 = vmulq_f32(vi3x4567, vw4567);
const uint32x4_t vm3x4567 = vcltq_s32(vreinterpretq_s32_f32(vi3x4567), vmovq_n_s32(0));
float32x4_t vacc3x89AB = vmulq_f32(vi3x89AB, vw89AB);
const uint32x4_t vm3x89AB = vcltq_s32(vreinterpretq_s32_f32(vi3x89AB), vmovq_n_s32(0));
float32x4_t vacc3xCDEF = vmulq_f32(vi3xCDEF, vwCDEF);
const uint32x4_t vm3xCDEF = vcltq_s32(vreinterpretq_s32_f32(vi3xCDEF), vmovq_n_s32(0));
vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
vacc0x4567 = vbslq_f32(vm0x4567, vacc0x4567, vi0x4567);
vacc0x89AB = vbslq_f32(vm0x89AB, vacc0x89AB, vi0x89AB);
vacc0xCDEF = vbslq_f32(vm0xCDEF, vacc0xCDEF, vi0xCDEF);
vacc1x0123 = vbslq_f32(vm1x0123, vacc1x0123, vi1x0123);
vacc1x4567 = vbslq_f32(vm1x4567, vacc1x4567, vi1x4567);
vacc1x89AB = vbslq_f32(vm1x89AB, vacc1x89AB, vi1x89AB);
vacc1xCDEF = vbslq_f32(vm1xCDEF, vacc1xCDEF, vi1xCDEF);
vacc2x0123 = vbslq_f32(vm2x0123, vacc2x0123, vi2x0123);
vacc2x4567 = vbslq_f32(vm2x4567, vacc2x4567, vi2x4567);
vacc2x89AB = vbslq_f32(vm2x89AB, vacc2x89AB, vi2x89AB);
vacc2xCDEF = vbslq_f32(vm2xCDEF, vacc2xCDEF, vi2xCDEF);
vacc3x0123 = vbslq_f32(vm3x0123, vacc3x0123, vi3x0123);
vacc3x4567 = vbslq_f32(vm3x4567, vacc3x4567, vi3x4567);
vacc3x89AB = vbslq_f32(vm3x89AB, vacc3x89AB, vi3x89AB);
vacc3xCDEF = vbslq_f32(vm3xCDEF, vacc3xCDEF, vi3xCDEF);
vst1q_f32(o0, vacc0x0123); o0 += 4;
vst1q_f32(o0, vacc0x4567); o0 += 4;
vst1q_f32(o0, vacc0x89AB); o0 += 4;
vst1q_f32(o0, vacc0xCDEF); o0 += 4;
vst1q_f32(o1, vacc1x0123); o1 += 4;
vst1q_f32(o1, vacc1x4567); o1 += 4;
vst1q_f32(o1, vacc1x89AB); o1 += 4;
vst1q_f32(o1, vacc1xCDEF); o1 += 4;
vst1q_f32(o2, vacc2x0123); o2 += 4;
vst1q_f32(o2, vacc2x4567); o2 += 4;
vst1q_f32(o2, vacc2x89AB); o2 += 4;
vst1q_f32(o2, vacc2xCDEF); o2 += 4;
vst1q_f32(o3, vacc3x0123); o3 += 4;
vst1q_f32(o3, vacc3x4567); o3 += 4;
vst1q_f32(o3, vacc3x89AB); o3 += 4;
vst1q_f32(o3, vacc3xCDEF); o3 += 4;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const float32x4_t vw0123 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
i0 += 4;
const float32x4_t vi1x0123 = vld1q_f32(i1);
i1 += 4;
const float32x4_t vi2x0123 = vld1q_f32(i2);
i2 += 4;
const float32x4_t vi3x0123 = vld1q_f32(i3);
i3 += 4;
float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
float32x4_t vacc1x0123 = vmulq_f32(vi1x0123, vw0123);
const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0));
float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123);
const uint32x4_t vm2x0123 = vcltq_s32(vreinterpretq_s32_f32(vi2x0123), vmovq_n_s32(0));
float32x4_t vacc3x0123 = vmulq_f32(vi3x0123, vw0123);
const uint32x4_t vm3x0123 = vcltq_s32(vreinterpretq_s32_f32(vi3x0123), vmovq_n_s32(0));
vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
vacc1x0123 = vbslq_f32(vm1x0123, vacc1x0123, vi1x0123);
vacc2x0123 = vbslq_f32(vm2x0123, vacc2x0123, vi2x0123);
vacc3x0123 = vbslq_f32(vm3x0123, vacc3x0123, vi3x0123);
vst1q_f32(o0, vacc0x0123); o0 += 4;
vst1q_f32(o1, vacc1x0123); o1 += 4;
vst1q_f32(o2, vacc2x0123); o2 += 4;
vst1q_f32(o3, vacc3x0123); o3 += 4;
}
if XNN_UNLIKELY(c != 0) {
const float32x4_t vw0123 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
const float32x4_t vi1x0123 = vld1q_f32(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
const float32x4_t vi2x0123 = vld1q_f32(i2);
i2 = (const float*) ((uintptr_t) i2 + c);
const float32x4_t vi3x0123 = vld1q_f32(i3);
i3 = (const float*) ((uintptr_t) i3 + c);
float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
float32x4_t vacc1x0123 = vmulq_f32(vi1x0123, vw0123);
const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0));
float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123);
const uint32x4_t vm2x0123 = vcltq_s32(vreinterpretq_s32_f32(vi2x0123), vmovq_n_s32(0));
float32x4_t vacc3x0123 = vmulq_f32(vi3x0123, vw0123);
const uint32x4_t vm3x0123 = vcltq_s32(vreinterpretq_s32_f32(vi3x0123), vmovq_n_s32(0));
vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
vacc1x0123 = vbslq_f32(vm1x0123, vacc1x0123, vi1x0123);
vacc2x0123 = vbslq_f32(vm2x0123, vacc2x0123, vi2x0123);
vacc3x0123 = vbslq_f32(vm3x0123, vacc3x0123, vi3x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
if (c & (2 * sizeof(float))) {
vst1_f32(o0, vacc0x01); o0 += 2;
vst1_f32(o1, vacc1x01); o1 += 2;
vst1_f32(o2, vacc2x01); o2 += 2;
vst1_f32(o3, vacc3x01); o3 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
}
if (c & (1 * sizeof(float))) {
vst1_lane_f32(o0, vacc0x01, 0); o0 += 1;
vst1_lane_f32(o1, vacc1x01, 0); o1 += 1;
vst1_lane_f32(o2, vacc2x01, 0); o2 += 1;
vst1_lane_f32(o3, vacc3x01, 0); o3 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
i2 = (const float*) ((uintptr_t) i2 + input_increment);
o2 = (float*) ((uintptr_t) o2 + output_increment);
i3 = (const float*) ((uintptr_t) i3 + input_increment);
o3 = (float*) ((uintptr_t) o3 + output_increment);
rows = doz(rows, 4);
} while (rows != 0);
}
| 11,033 | 45.167364 | 93 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-neon-4x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__neon_4x4(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
float* o2 = (float*) ((uintptr_t) o1 + output_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
float* o3 = (float*) ((uintptr_t) o2 + output_stride);
const size_t input_increment = input_stride * 4 - channels;
const size_t output_increment = output_stride * 4 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = i1;
o2 = o1;
}
if XNN_UNPREDICTABLE(rows < 4) {
i3 = i2;
o3 = o2;
}
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const float32x4_t vw0123 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
float32x4_t vacc1x0123 = vmulq_f32(vi1x0123, vw0123);
const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0));
float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123);
const uint32x4_t vm2x0123 = vcltq_s32(vreinterpretq_s32_f32(vi2x0123), vmovq_n_s32(0));
float32x4_t vacc3x0123 = vmulq_f32(vi3x0123, vw0123);
const uint32x4_t vm3x0123 = vcltq_s32(vreinterpretq_s32_f32(vi3x0123), vmovq_n_s32(0));
vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
vacc1x0123 = vbslq_f32(vm1x0123, vacc1x0123, vi1x0123);
vacc2x0123 = vbslq_f32(vm2x0123, vacc2x0123, vi2x0123);
vacc3x0123 = vbslq_f32(vm3x0123, vacc3x0123, vi3x0123);
vst1q_f32(o0, vacc0x0123); o0 += 4;
vst1q_f32(o1, vacc1x0123); o1 += 4;
vst1q_f32(o2, vacc2x0123); o2 += 4;
vst1q_f32(o3, vacc3x0123); o3 += 4;
}
if XNN_UNLIKELY(c != 0) {
const float32x4_t vw0123 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
const float32x4_t vi1x0123 = vld1q_f32(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
const float32x4_t vi2x0123 = vld1q_f32(i2);
i2 = (const float*) ((uintptr_t) i2 + c);
const float32x4_t vi3x0123 = vld1q_f32(i3);
i3 = (const float*) ((uintptr_t) i3 + c);
float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
float32x4_t vacc1x0123 = vmulq_f32(vi1x0123, vw0123);
const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0));
float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123);
const uint32x4_t vm2x0123 = vcltq_s32(vreinterpretq_s32_f32(vi2x0123), vmovq_n_s32(0));
float32x4_t vacc3x0123 = vmulq_f32(vi3x0123, vw0123);
const uint32x4_t vm3x0123 = vcltq_s32(vreinterpretq_s32_f32(vi3x0123), vmovq_n_s32(0));
vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
vacc1x0123 = vbslq_f32(vm1x0123, vacc1x0123, vi1x0123);
vacc2x0123 = vbslq_f32(vm2x0123, vacc2x0123, vi2x0123);
vacc3x0123 = vbslq_f32(vm3x0123, vacc3x0123, vi3x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
if (c & (2 * sizeof(float))) {
vst1_f32(o0, vacc0x01); o0 += 2;
vst1_f32(o1, vacc1x01); o1 += 2;
vst1_f32(o2, vacc2x01); o2 += 2;
vst1_f32(o3, vacc3x01); o3 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
}
if (c & (1 * sizeof(float))) {
vst1_lane_f32(o0, vacc0x01, 0); o0 += 1;
vst1_lane_f32(o1, vacc1x01, 0); o1 += 1;
vst1_lane_f32(o2, vacc2x01, 0); o2 += 1;
vst1_lane_f32(o3, vacc3x01, 0); o3 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
i2 = (const float*) ((uintptr_t) i2 + input_increment);
o2 = (float*) ((uintptr_t) o2 + output_increment);
i3 = (const float*) ((uintptr_t) i3 + input_increment);
o3 = (float*) ((uintptr_t) o3 + output_increment);
rows = doz(rows, 4);
} while (rows != 0);
}
| 5,643 | 37.924138 | 93 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-neon-4x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__neon_4x8(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
float* o2 = (float*) ((uintptr_t) o1 + output_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
float* o3 = (float*) ((uintptr_t) o2 + output_stride);
const size_t input_increment = input_stride * 4 - channels;
const size_t output_increment = output_stride * 4 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = i1;
o2 = o1;
}
if XNN_UNPREDICTABLE(rows < 4) {
i3 = i2;
o3 = o2;
}
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const float32x4_t vw0123 = vld1q_f32(w); w += 4;
const float32x4_t vw4567 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
float32x4_t vacc0x4567 = vmulq_f32(vi0x4567, vw4567);
const uint32x4_t vm0x4567 = vcltq_s32(vreinterpretq_s32_f32(vi0x4567), vmovq_n_s32(0));
float32x4_t vacc1x0123 = vmulq_f32(vi1x0123, vw0123);
const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0));
float32x4_t vacc1x4567 = vmulq_f32(vi1x4567, vw4567);
const uint32x4_t vm1x4567 = vcltq_s32(vreinterpretq_s32_f32(vi1x4567), vmovq_n_s32(0));
float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123);
const uint32x4_t vm2x0123 = vcltq_s32(vreinterpretq_s32_f32(vi2x0123), vmovq_n_s32(0));
float32x4_t vacc2x4567 = vmulq_f32(vi2x4567, vw4567);
const uint32x4_t vm2x4567 = vcltq_s32(vreinterpretq_s32_f32(vi2x4567), vmovq_n_s32(0));
float32x4_t vacc3x0123 = vmulq_f32(vi3x0123, vw0123);
const uint32x4_t vm3x0123 = vcltq_s32(vreinterpretq_s32_f32(vi3x0123), vmovq_n_s32(0));
float32x4_t vacc3x4567 = vmulq_f32(vi3x4567, vw4567);
const uint32x4_t vm3x4567 = vcltq_s32(vreinterpretq_s32_f32(vi3x4567), vmovq_n_s32(0));
vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
vacc0x4567 = vbslq_f32(vm0x4567, vacc0x4567, vi0x4567);
vacc1x0123 = vbslq_f32(vm1x0123, vacc1x0123, vi1x0123);
vacc1x4567 = vbslq_f32(vm1x4567, vacc1x4567, vi1x4567);
vacc2x0123 = vbslq_f32(vm2x0123, vacc2x0123, vi2x0123);
vacc2x4567 = vbslq_f32(vm2x4567, vacc2x4567, vi2x4567);
vacc3x0123 = vbslq_f32(vm3x0123, vacc3x0123, vi3x0123);
vacc3x4567 = vbslq_f32(vm3x4567, vacc3x4567, vi3x4567);
vst1q_f32(o0, vacc0x0123); o0 += 4;
vst1q_f32(o0, vacc0x4567); o0 += 4;
vst1q_f32(o1, vacc1x0123); o1 += 4;
vst1q_f32(o1, vacc1x4567); o1 += 4;
vst1q_f32(o2, vacc2x0123); o2 += 4;
vst1q_f32(o2, vacc2x4567); o2 += 4;
vst1q_f32(o3, vacc3x0123); o3 += 4;
vst1q_f32(o3, vacc3x4567); o3 += 4;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const float32x4_t vw0123 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
i0 += 4;
const float32x4_t vi1x0123 = vld1q_f32(i1);
i1 += 4;
const float32x4_t vi2x0123 = vld1q_f32(i2);
i2 += 4;
const float32x4_t vi3x0123 = vld1q_f32(i3);
i3 += 4;
float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
float32x4_t vacc1x0123 = vmulq_f32(vi1x0123, vw0123);
const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0));
float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123);
const uint32x4_t vm2x0123 = vcltq_s32(vreinterpretq_s32_f32(vi2x0123), vmovq_n_s32(0));
float32x4_t vacc3x0123 = vmulq_f32(vi3x0123, vw0123);
const uint32x4_t vm3x0123 = vcltq_s32(vreinterpretq_s32_f32(vi3x0123), vmovq_n_s32(0));
vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
vacc1x0123 = vbslq_f32(vm1x0123, vacc1x0123, vi1x0123);
vacc2x0123 = vbslq_f32(vm2x0123, vacc2x0123, vi2x0123);
vacc3x0123 = vbslq_f32(vm3x0123, vacc3x0123, vi3x0123);
vst1q_f32(o0, vacc0x0123); o0 += 4;
vst1q_f32(o1, vacc1x0123); o1 += 4;
vst1q_f32(o2, vacc2x0123); o2 += 4;
vst1q_f32(o3, vacc3x0123); o3 += 4;
}
if XNN_UNLIKELY(c != 0) {
const float32x4_t vw0123 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
const float32x4_t vi1x0123 = vld1q_f32(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
const float32x4_t vi2x0123 = vld1q_f32(i2);
i2 = (const float*) ((uintptr_t) i2 + c);
const float32x4_t vi3x0123 = vld1q_f32(i3);
i3 = (const float*) ((uintptr_t) i3 + c);
float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
float32x4_t vacc1x0123 = vmulq_f32(vi1x0123, vw0123);
const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0));
float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123);
const uint32x4_t vm2x0123 = vcltq_s32(vreinterpretq_s32_f32(vi2x0123), vmovq_n_s32(0));
float32x4_t vacc3x0123 = vmulq_f32(vi3x0123, vw0123);
const uint32x4_t vm3x0123 = vcltq_s32(vreinterpretq_s32_f32(vi3x0123), vmovq_n_s32(0));
vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
vacc1x0123 = vbslq_f32(vm1x0123, vacc1x0123, vi1x0123);
vacc2x0123 = vbslq_f32(vm2x0123, vacc2x0123, vi2x0123);
vacc3x0123 = vbslq_f32(vm3x0123, vacc3x0123, vi3x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
if (c & (2 * sizeof(float))) {
vst1_f32(o0, vacc0x01); o0 += 2;
vst1_f32(o1, vacc1x01); o1 += 2;
vst1_f32(o2, vacc2x01); o2 += 2;
vst1_f32(o3, vacc3x01); o3 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
}
if (c & (1 * sizeof(float))) {
vst1_lane_f32(o0, vacc0x01, 0); o0 += 1;
vst1_lane_f32(o1, vacc1x01, 0); o1 += 1;
vst1_lane_f32(o2, vacc2x01, 0); o2 += 1;
vst1_lane_f32(o3, vacc3x01, 0); o3 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
i2 = (const float*) ((uintptr_t) i2 + input_increment);
o2 = (float*) ((uintptr_t) o2 + output_increment);
i3 = (const float*) ((uintptr_t) i3 + input_increment);
o3 = (float*) ((uintptr_t) o3 + output_increment);
rows = doz(rows, 4);
} while (rows != 0);
}
| 8,384 | 41.563452 | 93 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-scalar-2x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__scalar_2x1(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride)
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
do {
const float vw = *w++;
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vacc0 = XNN_UNPREDICTABLE(vi0 < 0.0f) ? vi0 * vw : vi0;
const float vacc1 = XNN_UNPREDICTABLE(vi1 < 0.0f) ? vi1 * vw : vi1;
*o0++ = vacc0;
*o1++ = vacc1;
c -= sizeof(float);
} while (c != 0);
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 1,725 | 25.151515 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-scalar-2x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__scalar_2x4(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride)
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const float vw0 = w[0];
const float vw1 = w[1];
const float vw2 = w[2];
const float vw3 = w[3];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
const float vi0x2 = i0[2];
const float vi0x3 = i0[3];
i0 += 4;
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
const float vi1x2 = i1[2];
const float vi1x3 = i1[3];
i1 += 4;
const float vacc0x0 = XNN_UNPREDICTABLE(vi0x0 < 0.0f) ? vi0x0 * vw0 : vi0x0;
const float vacc0x1 = XNN_UNPREDICTABLE(vi0x1 < 0.0f) ? vi0x1 * vw1 : vi0x1;
const float vacc0x2 = XNN_UNPREDICTABLE(vi0x2 < 0.0f) ? vi0x2 * vw2 : vi0x2;
const float vacc0x3 = XNN_UNPREDICTABLE(vi0x3 < 0.0f) ? vi0x3 * vw3 : vi0x3;
const float vacc1x0 = XNN_UNPREDICTABLE(vi1x0 < 0.0f) ? vi1x0 * vw0 : vi1x0;
const float vacc1x1 = XNN_UNPREDICTABLE(vi1x1 < 0.0f) ? vi1x1 * vw1 : vi1x1;
const float vacc1x2 = XNN_UNPREDICTABLE(vi1x2 < 0.0f) ? vi1x2 * vw2 : vi1x2;
const float vacc1x3 = XNN_UNPREDICTABLE(vi1x3 < 0.0f) ? vi1x3 * vw3 : vi1x3;
o0[0] = vacc0x0;
o0[1] = vacc0x1;
o0[2] = vacc0x2;
o0[3] = vacc0x3;
o0 += 4;
o1[0] = vacc1x0;
o1[1] = vacc1x1;
o1[2] = vacc1x2;
o1[3] = vacc1x3;
o1 += 4;
w += 4;
}
for (; c != 0; c -= sizeof(float)) {
const float vw = *w++;
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vacc0 = XNN_UNPREDICTABLE(vi0 < 0.0f) ? vi0 * vw : vi0;
const float vacc1 = XNN_UNPREDICTABLE(vi1 < 0.0f) ? vi1 * vw : vi1;
*o0++ = vacc0;
*o1++ = vacc1;
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 3,091 | 29.019417 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-sse-2x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__sse_2x4(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const __m128 vzero = _mm_setzero_ps();
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const __m128 vw0123 = _mm_load_ps(w);
w += 4;
__m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vacc0x0123 = _mm_max_ps(_mm_setzero_ps(), vi0x0123);
vi0x0123 = _mm_min_ps(vi0x0123, vzero);
__m128 vacc1x0123 = _mm_max_ps(_mm_setzero_ps(), vi1x0123);
vi1x0123 = _mm_min_ps(vi1x0123, vzero);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(vi0x0123, vw0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(vi1x0123, vw0123));
_mm_storeu_ps(o0, vacc0x0123);
o0 += 4;
_mm_storeu_ps(o1, vacc1x0123);
o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const __m128 vw0123 = _mm_load_ps(w);
w = (const float*) ((uintptr_t) w + c);
__m128 vi0x0123 = _mm_loadu_ps(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
__m128 vi1x0123 = _mm_loadu_ps(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
__m128 vacc0x0123 = _mm_max_ps(_mm_setzero_ps(), vi0x0123);
vi0x0123 = _mm_min_ps(vi0x0123, vzero);
__m128 vacc1x0123 = _mm_max_ps(_mm_setzero_ps(), vi1x0123);
vi1x0123 = _mm_min_ps(vi1x0123, vzero);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(vi0x0123, vw0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(vi1x0123, vw0123));
if (c & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vacc0x0123);
_mm_storel_pi((__m64*) o1, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
_mm_store_ss(o0, vacc0x0123);
_mm_store_ss(o1, vacc1x0123);
o0 += 1;
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 3,306 | 28.526786 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-sse-2x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__sse_2x8(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const __m128 vzero = _mm_setzero_ps();
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const __m128 vw0123 = _mm_load_ps(w);
const __m128 vw4567 = _mm_load_ps(w + 4);
w += 8;
__m128 vi0x0123 = _mm_loadu_ps(i0);
__m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
__m128 vi1x0123 = _mm_loadu_ps(i1);
__m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
__m128 vacc0x0123 = _mm_max_ps(_mm_setzero_ps(), vi0x0123);
vi0x0123 = _mm_min_ps(vi0x0123, vzero);
__m128 vacc0x4567 = _mm_max_ps(_mm_setzero_ps(), vi0x4567);
vi0x4567 = _mm_min_ps(vi0x4567, vzero);
__m128 vacc1x0123 = _mm_max_ps(_mm_setzero_ps(), vi1x0123);
vi1x0123 = _mm_min_ps(vi1x0123, vzero);
__m128 vacc1x4567 = _mm_max_ps(_mm_setzero_ps(), vi1x4567);
vi1x4567 = _mm_min_ps(vi1x4567, vzero);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(vi0x0123, vw0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(vi0x4567, vw4567));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(vi1x0123, vw0123));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(vi1x4567, vw4567));
_mm_storeu_ps(o0, vacc0x0123);
_mm_storeu_ps(o0 + 4, vacc0x4567);
o0 += 8;
_mm_storeu_ps(o1, vacc1x0123);
_mm_storeu_ps(o1 + 4, vacc1x4567);
o1 += 8;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const __m128 vw0123 = _mm_load_ps(w);
w += 4;
__m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vacc0x0123 = _mm_max_ps(_mm_setzero_ps(), vi0x0123);
vi0x0123 = _mm_min_ps(vi0x0123, vzero);
__m128 vacc1x0123 = _mm_max_ps(_mm_setzero_ps(), vi1x0123);
vi1x0123 = _mm_min_ps(vi1x0123, vzero);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(vi0x0123, vw0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(vi1x0123, vw0123));
_mm_storeu_ps(o0, vacc0x0123);
o0 += 4;
_mm_storeu_ps(o1, vacc1x0123);
o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const __m128 vw0123 = _mm_load_ps(w);
w = (const float*) ((uintptr_t) w + c);
__m128 vi0x0123 = _mm_loadu_ps(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
__m128 vi1x0123 = _mm_loadu_ps(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
__m128 vacc0x0123 = _mm_max_ps(_mm_setzero_ps(), vi0x0123);
vi0x0123 = _mm_min_ps(vi0x0123, vzero);
__m128 vacc1x0123 = _mm_max_ps(_mm_setzero_ps(), vi1x0123);
vi1x0123 = _mm_min_ps(vi1x0123, vzero);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(vi0x0123, vw0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(vi1x0123, vw0123));
if (c & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vacc0x0123);
_mm_storel_pi((__m64*) o1, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
_mm_store_ss(o0, vacc0x0123);
_mm_store_ss(o1, vacc1x0123);
o0 += 1;
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 4,615 | 30.834483 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-sse2-2x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__sse2_2x4(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const __m128 vw0123 = _mm_load_ps(w);
w += 4;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vprod0x0123 = _mm_mul_ps(vi0x0123, vw0123);
const __m128 vmask0x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi0x0123)));
const __m128 vprod1x0123 = _mm_mul_ps(vi1x0123, vw0123);
const __m128 vmask1x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi1x0123)));
const __m128 vacc0x0123 = _mm_or_ps(_mm_and_ps(vprod0x0123, vmask0x0123), _mm_andnot_ps(vmask0x0123, vi0x0123));
const __m128 vacc1x0123 = _mm_or_ps(_mm_and_ps(vprod1x0123, vmask1x0123), _mm_andnot_ps(vmask1x0123, vi1x0123));
_mm_storeu_ps(o0, vacc0x0123);
o0 += 4;
_mm_storeu_ps(o1, vacc1x0123);
o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const __m128 vw0123 = _mm_load_ps(w);
w = (const float*) ((uintptr_t) w + c);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
const __m128 vprod0x0123 = _mm_mul_ps(vi0x0123, vw0123);
const __m128 vmask0x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi0x0123)));
const __m128 vprod1x0123 = _mm_mul_ps(vi1x0123, vw0123);
const __m128 vmask1x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi1x0123)));
__m128 vacc0x0123 = _mm_or_ps(_mm_and_ps(vprod0x0123, vmask0x0123), _mm_andnot_ps(vmask0x0123, vi0x0123));
__m128 vacc1x0123 = _mm_or_ps(_mm_and_ps(vprod1x0123, vmask1x0123), _mm_andnot_ps(vmask1x0123, vi1x0123));
if (c & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vacc0x0123);
_mm_storel_pi((__m64*) o1, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
_mm_store_ss(o0, vacc0x0123);
_mm_store_ss(o1, vacc1x0123);
o0 += 1;
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 3,734 | 32.648649 | 118 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-sse2-2x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__sse2_2x8(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const __m128 vw0123 = _mm_load_ps(w);
const __m128 vw4567 = _mm_load_ps(w + 4);
w += 8;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vprod0x0123 = _mm_mul_ps(vi0x0123, vw0123);
const __m128 vmask0x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi0x0123)));
const __m128 vprod0x4567 = _mm_mul_ps(vi0x4567, vw4567);
const __m128 vmask0x4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi0x4567)));
const __m128 vprod1x0123 = _mm_mul_ps(vi1x0123, vw0123);
const __m128 vmask1x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi1x0123)));
const __m128 vprod1x4567 = _mm_mul_ps(vi1x4567, vw4567);
const __m128 vmask1x4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi1x4567)));
const __m128 vacc0x0123 = _mm_or_ps(_mm_and_ps(vprod0x0123, vmask0x0123), _mm_andnot_ps(vmask0x0123, vi0x0123));
const __m128 vacc0x4567 = _mm_or_ps(_mm_and_ps(vprod0x4567, vmask0x4567), _mm_andnot_ps(vmask0x4567, vi0x4567));
const __m128 vacc1x0123 = _mm_or_ps(_mm_and_ps(vprod1x0123, vmask1x0123), _mm_andnot_ps(vmask1x0123, vi1x0123));
const __m128 vacc1x4567 = _mm_or_ps(_mm_and_ps(vprod1x4567, vmask1x4567), _mm_andnot_ps(vmask1x4567, vi1x4567));
_mm_storeu_ps(o0, vacc0x0123);
_mm_storeu_ps(o0 + 4, vacc0x4567);
o0 += 8;
_mm_storeu_ps(o1, vacc1x0123);
_mm_storeu_ps(o1 + 4, vacc1x4567);
o1 += 8;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const __m128 vw0123 = _mm_load_ps(w);
w += 4;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vprod0x0123 = _mm_mul_ps(vi0x0123, vw0123);
const __m128 vmask0x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi0x0123)));
const __m128 vprod1x0123 = _mm_mul_ps(vi1x0123, vw0123);
const __m128 vmask1x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi1x0123)));
__m128 vacc0x0123 = _mm_or_ps(_mm_and_ps(vprod0x0123, vmask0x0123), _mm_andnot_ps(vmask0x0123, vi0x0123));
__m128 vacc1x0123 = _mm_or_ps(_mm_and_ps(vprod1x0123, vmask1x0123), _mm_andnot_ps(vmask1x0123, vi1x0123));
_mm_storeu_ps(o0, vacc0x0123);
o0 += 4;
_mm_storeu_ps(o1, vacc1x0123);
o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const __m128 vw0123 = _mm_load_ps(w);
w = (const float*) ((uintptr_t) w + c);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
const __m128 vprod0x0123 = _mm_mul_ps(vi0x0123, vw0123);
const __m128 vmask0x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi0x0123)));
const __m128 vprod1x0123 = _mm_mul_ps(vi1x0123, vw0123);
const __m128 vmask1x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi1x0123)));
__m128 vacc0x0123 = _mm_or_ps(_mm_and_ps(vprod0x0123, vmask0x0123), _mm_andnot_ps(vmask0x0123, vi0x0123));
__m128 vacc1x0123 = _mm_or_ps(_mm_and_ps(vprod1x0123, vmask1x0123), _mm_andnot_ps(vmask1x0123, vi1x0123));
if (c & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vacc0x0123);
_mm_storel_pi((__m64*) o1, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
_mm_store_ss(o0, vacc0x0123);
_mm_store_ss(o1, vacc1x0123);
o0 += 1;
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 5,511 | 37.277778 | 118 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-sse41-2x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__sse41_2x4(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const __m128 vw0123 = _mm_load_ps(w);
w += 4;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vprod0x0123 = _mm_mul_ps(vi0x0123, vw0123);
const __m128 vprod1x0123 = _mm_mul_ps(vi1x0123, vw0123);
const __m128 vacc0x0123 = _mm_blendv_ps(vi0x0123, vprod0x0123, vi0x0123);
const __m128 vacc1x0123 = _mm_blendv_ps(vi1x0123, vprod1x0123, vi1x0123);
_mm_storeu_ps(o0, vacc0x0123);
o0 += 4;
_mm_storeu_ps(o1, vacc1x0123);
o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const __m128 vw0123 = _mm_load_ps(w);
w = (const float*) ((uintptr_t) w + c);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
const __m128 vprod0x0123 = _mm_mul_ps(vi0x0123, vw0123);
const __m128 vprod1x0123 = _mm_mul_ps(vi1x0123, vw0123);
__m128 vacc0x0123 = _mm_blendv_ps(vi0x0123, vprod0x0123, vi0x0123);
__m128 vacc1x0123 = _mm_blendv_ps(vi1x0123, vprod1x0123, vi1x0123);
if (c & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vacc0x0123);
_mm_storel_pi((__m64*) o1, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
_mm_store_ss(o0, vacc0x0123);
_mm_store_ss(o1, vacc1x0123);
o0 += 1;
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 3,111 | 28.084112 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-sse41-2x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__sse41_2x8(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const __m128 vw0123 = _mm_load_ps(w);
const __m128 vw4567 = _mm_load_ps(w + 4);
w += 8;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vprod0x0123 = _mm_mul_ps(vi0x0123, vw0123);
const __m128 vprod0x4567 = _mm_mul_ps(vi0x4567, vw4567);
const __m128 vprod1x0123 = _mm_mul_ps(vi1x0123, vw0123);
const __m128 vprod1x4567 = _mm_mul_ps(vi1x4567, vw4567);
const __m128 vacc0x0123 = _mm_blendv_ps(vi0x0123, vprod0x0123, vi0x0123);
const __m128 vacc0x4567 = _mm_blendv_ps(vi0x4567, vprod0x4567, vi0x4567);
const __m128 vacc1x0123 = _mm_blendv_ps(vi1x0123, vprod1x0123, vi1x0123);
const __m128 vacc1x4567 = _mm_blendv_ps(vi1x4567, vprod1x4567, vi1x4567);
_mm_storeu_ps(o0, vacc0x0123);
_mm_storeu_ps(o0 + 4, vacc0x4567);
o0 += 8;
_mm_storeu_ps(o1, vacc1x0123);
_mm_storeu_ps(o1 + 4, vacc1x4567);
o1 += 8;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const __m128 vw0123 = _mm_load_ps(w);
w += 4;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vprod0x0123 = _mm_mul_ps(vi0x0123, vw0123);
const __m128 vprod1x0123 = _mm_mul_ps(vi1x0123, vw0123);
__m128 vacc0x0123 = _mm_blendv_ps(vi0x0123, vprod0x0123, vi0x0123);
__m128 vacc1x0123 = _mm_blendv_ps(vi1x0123, vprod1x0123, vi1x0123);
_mm_storeu_ps(o0, vacc0x0123);
o0 += 4;
_mm_storeu_ps(o1, vacc1x0123);
o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const __m128 vw0123 = _mm_load_ps(w);
w = (const float*) ((uintptr_t) w + c);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
const __m128 vprod0x0123 = _mm_mul_ps(vi0x0123, vw0123);
const __m128 vprod1x0123 = _mm_mul_ps(vi1x0123, vw0123);
__m128 vacc0x0123 = _mm_blendv_ps(vi0x0123, vprod0x0123, vi0x0123);
__m128 vacc1x0123 = _mm_blendv_ps(vi1x0123, vprod1x0123, vi1x0123);
if (c & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vacc0x0123);
_mm_storel_pi((__m64*) o1, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
_mm_store_ss(o0, vacc0x0123);
_mm_store_ss(o1, vacc1x0123);
o0 += 1;
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 4,264 | 30.360294 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasm-2x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasm_2x1(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride)
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const float vzero = 0.0f;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
do {
const float vw = *w++;
float vi0 = *i0++;
float vi1 = *i1++;
float vacc0 = __builtin_wasm_max_f32(vi0, vzero);
vi0 = __builtin_wasm_min_f32(vi0, vzero);
float vacc1 = __builtin_wasm_max_f32(vi1, vzero);
vi1 = __builtin_wasm_min_f32(vi1, vzero);
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
*o0++ = vacc0;
*o1++ = vacc1;
c -= sizeof(float);
} while (c != 0);
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 1,848 | 24.680556 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasm-2x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasm_2x4(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride)
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const float vzero = 0.0f;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const float vw0 = w[0];
const float vw1 = w[1];
const float vw2 = w[2];
const float vw3 = w[3];
float vi0x0 = i0[0];
float vi0x1 = i0[1];
float vi0x2 = i0[2];
float vi0x3 = i0[3];
i0 += 4;
float vi1x0 = i1[0];
float vi1x1 = i1[1];
float vi1x2 = i1[2];
float vi1x3 = i1[3];
i1 += 4;
float vacc0x0 = __builtin_wasm_max_f32(vi0x0, vzero);
vi0x0 = __builtin_wasm_min_f32(vi0x0, vzero);
float vacc0x1 = __builtin_wasm_max_f32(vi0x1, vzero);
vi0x1 = __builtin_wasm_min_f32(vi0x1, vzero);
float vacc0x2 = __builtin_wasm_max_f32(vi0x2, vzero);
vi0x2 = __builtin_wasm_min_f32(vi0x2, vzero);
float vacc0x3 = __builtin_wasm_max_f32(vi0x3, vzero);
vi0x3 = __builtin_wasm_min_f32(vi0x3, vzero);
float vacc1x0 = __builtin_wasm_max_f32(vi1x0, vzero);
vi1x0 = __builtin_wasm_min_f32(vi1x0, vzero);
float vacc1x1 = __builtin_wasm_max_f32(vi1x1, vzero);
vi1x1 = __builtin_wasm_min_f32(vi1x1, vzero);
float vacc1x2 = __builtin_wasm_max_f32(vi1x2, vzero);
vi1x2 = __builtin_wasm_min_f32(vi1x2, vzero);
float vacc1x3 = __builtin_wasm_max_f32(vi1x3, vzero);
vi1x3 = __builtin_wasm_min_f32(vi1x3, vzero);
vacc0x0 += vi0x0 * vw0;
vacc0x1 += vi0x1 * vw1;
vacc0x2 += vi0x2 * vw2;
vacc0x3 += vi0x3 * vw3;
vacc1x0 += vi1x0 * vw0;
vacc1x1 += vi1x1 * vw1;
vacc1x2 += vi1x2 * vw2;
vacc1x3 += vi1x3 * vw3;
o0[0] = vacc0x0;
o0[1] = vacc0x1;
o0[2] = vacc0x2;
o0[3] = vacc0x3;
o0 += 4;
o1[0] = vacc1x0;
o1[1] = vacc1x1;
o1[2] = vacc1x2;
o1[3] = vacc1x3;
o1 += 4;
w += 4;
}
for (; c != 0; c -= sizeof(float)) {
const float vw = *w++;
float vi0 = *i0++;
float vi1 = *i1++;
float vacc0 = __builtin_wasm_max_f32(vi0, vzero);
vi0 = __builtin_wasm_min_f32(vi0, vzero);
float vacc1 = __builtin_wasm_max_f32(vi1, vzero);
vi1 = __builtin_wasm_min_f32(vi1, vzero);
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
*o0++ = vacc0;
*o1++ = vacc1;
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 3,639 | 27.888889 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmrelaxedsimd-iminmax-1x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-iminmax.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmrelaxedsimd_iminmax_1x16(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const size_t input_increment = input_stride * 1 - channels;
const size_t output_increment = output_stride * 1 - channels;
const v128_t vzero = wasm_i32x4_const_splat(0);
do {
const float* w = weights;
size_t c = channels;
for (; c >= 16 * sizeof(float); c -= 16 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
const v128_t vw4567 = wasm_v128_load(w + 4);
const v128_t vw89AB = wasm_v128_load(w + 8);
const v128_t vwCDEF = wasm_v128_load(w + 12);
w += 16;
v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vi0x4567 = wasm_v128_load(i0 + 4);
v128_t vi0x89AB = wasm_v128_load(i0 + 8);
v128_t vi0xCDEF = wasm_v128_load(i0 + 12);
i0 += 16;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc0x4567 = wasm_i32x4_max(vi0x4567, vzero);
vi0x4567 = wasm_i32x4_min(vi0x4567, vzero);
v128_t vacc0x89AB = wasm_i32x4_max(vi0x89AB, vzero);
vi0x89AB = wasm_i32x4_min(vi0x89AB, vzero);
v128_t vacc0xCDEF = wasm_i32x4_max(vi0xCDEF, vzero);
vi0xCDEF = wasm_i32x4_min(vi0xCDEF, vzero);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vw0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(vi0x4567, vw4567, vacc0x4567);
vacc0x89AB = __builtin_wasm_relaxed_madd_f32x4(vi0x89AB, vw89AB, vacc0x89AB);
vacc0xCDEF = __builtin_wasm_relaxed_madd_f32x4(vi0xCDEF, vwCDEF, vacc0xCDEF);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
wasm_v128_store(o0 + 8, vacc0x89AB);
wasm_v128_store(o0 + 12, vacc0xCDEF);
o0 += 16;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vw0123, vacc0x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vw0123, vacc0x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
o0 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
o0 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
rows = doz(rows, 1);
} while (rows != 0);
}
| 3,796 | 30.641667 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmrelaxedsimd-iminmax-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-iminmax.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmrelaxedsimd_iminmax_1x4(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const size_t input_increment = input_stride * 1 - channels;
const size_t output_increment = output_stride * 1 - channels;
const v128_t vzero = wasm_i32x4_const_splat(0);
do {
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vw0123, vacc0x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vw0123, vacc0x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
o0 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
o0 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
rows = doz(rows, 1);
} while (rows != 0);
}
| 2,344 | 25.954023 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmrelaxedsimd-iminmax-1x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-iminmax.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmrelaxedsimd_iminmax_1x8(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const size_t input_increment = input_stride * 1 - channels;
const size_t output_increment = output_stride * 1 - channels;
const v128_t vzero = wasm_i32x4_const_splat(0);
do {
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
const v128_t vw4567 = wasm_v128_load(w + 4);
w += 8;
v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc0x4567 = wasm_i32x4_max(vi0x4567, vzero);
vi0x4567 = wasm_i32x4_min(vi0x4567, vzero);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vw0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(vi0x4567, vw4567, vacc0x4567);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
o0 += 8;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vw0123, vacc0x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vw0123, vacc0x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
o0 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
o0 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
rows = doz(rows, 1);
} while (rows != 0);
}
| 3,117 | 27.87037 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmrelaxedsimd-iminmax-2x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-iminmax.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmrelaxedsimd_iminmax_2x16(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const v128_t vzero = wasm_i32x4_const_splat(0);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 16 * sizeof(float); c -= 16 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
const v128_t vw4567 = wasm_v128_load(w + 4);
const v128_t vw89AB = wasm_v128_load(w + 8);
const v128_t vwCDEF = wasm_v128_load(w + 12);
w += 16;
v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vi0x4567 = wasm_v128_load(i0 + 4);
v128_t vi0x89AB = wasm_v128_load(i0 + 8);
v128_t vi0xCDEF = wasm_v128_load(i0 + 12);
i0 += 16;
v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vi1x4567 = wasm_v128_load(i1 + 4);
v128_t vi1x89AB = wasm_v128_load(i1 + 8);
v128_t vi1xCDEF = wasm_v128_load(i1 + 12);
i1 += 16;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc0x4567 = wasm_i32x4_max(vi0x4567, vzero);
vi0x4567 = wasm_i32x4_min(vi0x4567, vzero);
v128_t vacc0x89AB = wasm_i32x4_max(vi0x89AB, vzero);
vi0x89AB = wasm_i32x4_min(vi0x89AB, vzero);
v128_t vacc0xCDEF = wasm_i32x4_max(vi0xCDEF, vzero);
vi0xCDEF = wasm_i32x4_min(vi0xCDEF, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
v128_t vacc1x4567 = wasm_i32x4_max(vi1x4567, vzero);
vi1x4567 = wasm_i32x4_min(vi1x4567, vzero);
v128_t vacc1x89AB = wasm_i32x4_max(vi1x89AB, vzero);
vi1x89AB = wasm_i32x4_min(vi1x89AB, vzero);
v128_t vacc1xCDEF = wasm_i32x4_max(vi1xCDEF, vzero);
vi1xCDEF = wasm_i32x4_min(vi1xCDEF, vzero);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vw0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(vi0x4567, vw4567, vacc0x4567);
vacc0x89AB = __builtin_wasm_relaxed_madd_f32x4(vi0x89AB, vw89AB, vacc0x89AB);
vacc0xCDEF = __builtin_wasm_relaxed_madd_f32x4(vi0xCDEF, vwCDEF, vacc0xCDEF);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vw0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(vi1x4567, vw4567, vacc1x4567);
vacc1x89AB = __builtin_wasm_relaxed_madd_f32x4(vi1x89AB, vw89AB, vacc1x89AB);
vacc1xCDEF = __builtin_wasm_relaxed_madd_f32x4(vi1xCDEF, vwCDEF, vacc1xCDEF);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
wasm_v128_store(o0 + 8, vacc0x89AB);
wasm_v128_store(o0 + 12, vacc0xCDEF);
o0 += 16;
wasm_v128_store(o1, vacc1x0123);
wasm_v128_store(o1 + 4, vacc1x4567);
wasm_v128_store(o1 + 8, vacc1x89AB);
wasm_v128_store(o1 + 12, vacc1xCDEF);
o1 += 16;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vw0123, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vw0123, vacc1x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vi1x0123 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vw0123, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vw0123, vacc1x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
wasm_v128_store64_lane(o1, vacc1x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
wasm_v128_store32_lane(o1, vacc1x0123, 0);
o0 += 1;
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 6,069 | 35.347305 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmrelaxedsimd-iminmax-2x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-iminmax.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmrelaxedsimd_iminmax_2x4(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const v128_t vzero = wasm_i32x4_const_splat(0);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vw0123, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vw0123, vacc1x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vi1x0123 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vw0123, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vw0123, vacc1x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
wasm_v128_store64_lane(o1, vacc1x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
wasm_v128_store32_lane(o1, vacc1x0123, 0);
o0 += 1;
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 3,455 | 29.857143 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmrelaxedsimd-iminmax-2x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-iminmax.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmrelaxedsimd_iminmax_2x8(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const v128_t vzero = wasm_i32x4_const_splat(0);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
const v128_t vw4567 = wasm_v128_load(w + 4);
w += 8;
v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc0x4567 = wasm_i32x4_max(vi0x4567, vzero);
vi0x4567 = wasm_i32x4_min(vi0x4567, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
v128_t vacc1x4567 = wasm_i32x4_max(vi1x4567, vzero);
vi1x4567 = wasm_i32x4_min(vi1x4567, vzero);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vw0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(vi0x4567, vw4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vw0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(vi1x4567, vw4567, vacc1x4567);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
o0 += 8;
wasm_v128_store(o1, vacc1x0123);
wasm_v128_store(o1 + 4, vacc1x4567);
o1 += 8;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vw0123, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vw0123, vacc1x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vi1x0123 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vw0123, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vw0123, vacc1x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
wasm_v128_store64_lane(o1, vacc1x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
wasm_v128_store32_lane(o1, vacc1x0123, 0);
o0 += 1;
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 4,818 | 32.234483 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmrelaxedsimd-iminmax-4x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-iminmax.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmrelaxedsimd_iminmax_4x16(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
float* o2 = (float*) ((uintptr_t) o1 + output_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
float* o3 = (float*) ((uintptr_t) o2 + output_stride);
const size_t input_increment = input_stride * 4 - channels;
const size_t output_increment = output_stride * 4 - channels;
const v128_t vzero = wasm_i32x4_const_splat(0);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = i1;
o2 = o1;
}
if XNN_UNPREDICTABLE(rows < 4) {
i3 = i2;
o3 = o2;
}
const float* w = weights;
size_t c = channels;
for (; c >= 16 * sizeof(float); c -= 16 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
const v128_t vw4567 = wasm_v128_load(w + 4);
const v128_t vw89AB = wasm_v128_load(w + 8);
const v128_t vwCDEF = wasm_v128_load(w + 12);
w += 16;
v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vi0x4567 = wasm_v128_load(i0 + 4);
v128_t vi0x89AB = wasm_v128_load(i0 + 8);
v128_t vi0xCDEF = wasm_v128_load(i0 + 12);
i0 += 16;
v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vi1x4567 = wasm_v128_load(i1 + 4);
v128_t vi1x89AB = wasm_v128_load(i1 + 8);
v128_t vi1xCDEF = wasm_v128_load(i1 + 12);
i1 += 16;
v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vi2x4567 = wasm_v128_load(i2 + 4);
v128_t vi2x89AB = wasm_v128_load(i2 + 8);
v128_t vi2xCDEF = wasm_v128_load(i2 + 12);
i2 += 16;
v128_t vi3x0123 = wasm_v128_load(i3);
v128_t vi3x4567 = wasm_v128_load(i3 + 4);
v128_t vi3x89AB = wasm_v128_load(i3 + 8);
v128_t vi3xCDEF = wasm_v128_load(i3 + 12);
i3 += 16;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc0x4567 = wasm_i32x4_max(vi0x4567, vzero);
vi0x4567 = wasm_i32x4_min(vi0x4567, vzero);
v128_t vacc0x89AB = wasm_i32x4_max(vi0x89AB, vzero);
vi0x89AB = wasm_i32x4_min(vi0x89AB, vzero);
v128_t vacc0xCDEF = wasm_i32x4_max(vi0xCDEF, vzero);
vi0xCDEF = wasm_i32x4_min(vi0xCDEF, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
v128_t vacc1x4567 = wasm_i32x4_max(vi1x4567, vzero);
vi1x4567 = wasm_i32x4_min(vi1x4567, vzero);
v128_t vacc1x89AB = wasm_i32x4_max(vi1x89AB, vzero);
vi1x89AB = wasm_i32x4_min(vi1x89AB, vzero);
v128_t vacc1xCDEF = wasm_i32x4_max(vi1xCDEF, vzero);
vi1xCDEF = wasm_i32x4_min(vi1xCDEF, vzero);
v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero);
vi2x0123 = wasm_i32x4_min(vi2x0123, vzero);
v128_t vacc2x4567 = wasm_i32x4_max(vi2x4567, vzero);
vi2x4567 = wasm_i32x4_min(vi2x4567, vzero);
v128_t vacc2x89AB = wasm_i32x4_max(vi2x89AB, vzero);
vi2x89AB = wasm_i32x4_min(vi2x89AB, vzero);
v128_t vacc2xCDEF = wasm_i32x4_max(vi2xCDEF, vzero);
vi2xCDEF = wasm_i32x4_min(vi2xCDEF, vzero);
v128_t vacc3x0123 = wasm_i32x4_max(vi3x0123, vzero);
vi3x0123 = wasm_i32x4_min(vi3x0123, vzero);
v128_t vacc3x4567 = wasm_i32x4_max(vi3x4567, vzero);
vi3x4567 = wasm_i32x4_min(vi3x4567, vzero);
v128_t vacc3x89AB = wasm_i32x4_max(vi3x89AB, vzero);
vi3x89AB = wasm_i32x4_min(vi3x89AB, vzero);
v128_t vacc3xCDEF = wasm_i32x4_max(vi3xCDEF, vzero);
vi3xCDEF = wasm_i32x4_min(vi3xCDEF, vzero);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vw0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(vi0x4567, vw4567, vacc0x4567);
vacc0x89AB = __builtin_wasm_relaxed_madd_f32x4(vi0x89AB, vw89AB, vacc0x89AB);
vacc0xCDEF = __builtin_wasm_relaxed_madd_f32x4(vi0xCDEF, vwCDEF, vacc0xCDEF);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vw0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(vi1x4567, vw4567, vacc1x4567);
vacc1x89AB = __builtin_wasm_relaxed_madd_f32x4(vi1x89AB, vw89AB, vacc1x89AB);
vacc1xCDEF = __builtin_wasm_relaxed_madd_f32x4(vi1xCDEF, vwCDEF, vacc1xCDEF);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vw0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(vi2x4567, vw4567, vacc2x4567);
vacc2x89AB = __builtin_wasm_relaxed_madd_f32x4(vi2x89AB, vw89AB, vacc2x89AB);
vacc2xCDEF = __builtin_wasm_relaxed_madd_f32x4(vi2xCDEF, vwCDEF, vacc2xCDEF);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vw0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(vi3x4567, vw4567, vacc3x4567);
vacc3x89AB = __builtin_wasm_relaxed_madd_f32x4(vi3x89AB, vw89AB, vacc3x89AB);
vacc3xCDEF = __builtin_wasm_relaxed_madd_f32x4(vi3xCDEF, vwCDEF, vacc3xCDEF);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
wasm_v128_store(o0 + 8, vacc0x89AB);
wasm_v128_store(o0 + 12, vacc0xCDEF);
o0 += 16;
wasm_v128_store(o1, vacc1x0123);
wasm_v128_store(o1 + 4, vacc1x4567);
wasm_v128_store(o1 + 8, vacc1x89AB);
wasm_v128_store(o1 + 12, vacc1xCDEF);
o1 += 16;
wasm_v128_store(o2, vacc2x0123);
wasm_v128_store(o2 + 4, vacc2x4567);
wasm_v128_store(o2 + 8, vacc2x89AB);
wasm_v128_store(o2 + 12, vacc2xCDEF);
o2 += 16;
wasm_v128_store(o3, vacc3x0123);
wasm_v128_store(o3 + 4, vacc3x4567);
wasm_v128_store(o3 + 8, vacc3x89AB);
wasm_v128_store(o3 + 12, vacc3xCDEF);
o3 += 16;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero);
vi2x0123 = wasm_i32x4_min(vi2x0123, vzero);
v128_t vacc3x0123 = wasm_i32x4_max(vi3x0123, vzero);
vi3x0123 = wasm_i32x4_min(vi3x0123, vzero);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vw0123, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vw0123, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vw0123, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vw0123, vacc3x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
wasm_v128_store(o2, vacc2x0123);
o2 += 4;
wasm_v128_store(o3, vacc3x0123);
o3 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vi1x0123 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
v128_t vi2x0123 = wasm_v128_load(i2);
i2 = (const float*) ((uintptr_t) i2 + c);
v128_t vi3x0123 = wasm_v128_load(i3);
i3 = (const float*) ((uintptr_t) i3 + c);
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero);
vi2x0123 = wasm_i32x4_min(vi2x0123, vzero);
v128_t vacc3x0123 = wasm_i32x4_max(vi3x0123, vzero);
vi3x0123 = wasm_i32x4_min(vi3x0123, vzero);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vw0123, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vw0123, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vw0123, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vw0123, vacc3x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
wasm_v128_store64_lane(o1, vacc1x0123, 0);
wasm_v128_store64_lane(o2, vacc2x0123, 0);
wasm_v128_store64_lane(o3, vacc3x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
o0 += 2;
o1 += 2;
o2 += 2;
o3 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
wasm_v128_store32_lane(o1, vacc1x0123, 0);
wasm_v128_store32_lane(o2, vacc2x0123, 0);
wasm_v128_store32_lane(o3, vacc3x0123, 0);
o0 += 1;
o1 += 1;
o2 += 1;
o3 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
i2 = (const float*) ((uintptr_t) i2 + input_increment);
o2 = (float*) ((uintptr_t) o2 + output_increment);
i3 = (const float*) ((uintptr_t) i3 + input_increment);
o3 = (float*) ((uintptr_t) o3 + output_increment);
rows = doz(rows, 4);
} while (rows != 0);
}
| 10,616 | 39.678161 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmrelaxedsimd-iminmax-4x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-iminmax.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmrelaxedsimd_iminmax_4x4(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
float* o2 = (float*) ((uintptr_t) o1 + output_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
float* o3 = (float*) ((uintptr_t) o2 + output_stride);
const size_t input_increment = input_stride * 4 - channels;
const size_t output_increment = output_stride * 4 - channels;
const v128_t vzero = wasm_i32x4_const_splat(0);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = i1;
o2 = o1;
}
if XNN_UNPREDICTABLE(rows < 4) {
i3 = i2;
o3 = o2;
}
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero);
vi2x0123 = wasm_i32x4_min(vi2x0123, vzero);
v128_t vacc3x0123 = wasm_i32x4_max(vi3x0123, vzero);
vi3x0123 = wasm_i32x4_min(vi3x0123, vzero);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vw0123, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vw0123, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vw0123, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vw0123, vacc3x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
wasm_v128_store(o2, vacc2x0123);
o2 += 4;
wasm_v128_store(o3, vacc3x0123);
o3 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vi1x0123 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
v128_t vi2x0123 = wasm_v128_load(i2);
i2 = (const float*) ((uintptr_t) i2 + c);
v128_t vi3x0123 = wasm_v128_load(i3);
i3 = (const float*) ((uintptr_t) i3 + c);
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero);
vi2x0123 = wasm_i32x4_min(vi2x0123, vzero);
v128_t vacc3x0123 = wasm_i32x4_max(vi3x0123, vzero);
vi3x0123 = wasm_i32x4_min(vi3x0123, vzero);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vw0123, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vw0123, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vw0123, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vw0123, vacc3x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
wasm_v128_store64_lane(o1, vacc1x0123, 0);
wasm_v128_store64_lane(o2, vacc2x0123, 0);
wasm_v128_store64_lane(o3, vacc3x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
o0 += 2;
o1 += 2;
o2 += 2;
o3 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
wasm_v128_store32_lane(o1, vacc1x0123, 0);
wasm_v128_store32_lane(o2, vacc2x0123, 0);
wasm_v128_store32_lane(o3, vacc3x0123, 0);
o0 += 1;
o1 += 1;
o2 += 1;
o3 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
i2 = (const float*) ((uintptr_t) i2 + input_increment);
o2 = (float*) ((uintptr_t) o2 + output_increment);
i3 = (const float*) ((uintptr_t) i3 + input_increment);
o3 = (float*) ((uintptr_t) o3 + output_increment);
rows = doz(rows, 4);
} while (rows != 0);
}
| 5,678 | 34.055556 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmrelaxedsimd-iminmax-4x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-iminmax.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmrelaxedsimd_iminmax_4x8(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
float* o2 = (float*) ((uintptr_t) o1 + output_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
float* o3 = (float*) ((uintptr_t) o2 + output_stride);
const size_t input_increment = input_stride * 4 - channels;
const size_t output_increment = output_stride * 4 - channels;
const v128_t vzero = wasm_i32x4_const_splat(0);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = i1;
o2 = o1;
}
if XNN_UNPREDICTABLE(rows < 4) {
i3 = i2;
o3 = o2;
}
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
const v128_t vw4567 = wasm_v128_load(w + 4);
w += 8;
v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
v128_t vi3x0123 = wasm_v128_load(i3);
v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc0x4567 = wasm_i32x4_max(vi0x4567, vzero);
vi0x4567 = wasm_i32x4_min(vi0x4567, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
v128_t vacc1x4567 = wasm_i32x4_max(vi1x4567, vzero);
vi1x4567 = wasm_i32x4_min(vi1x4567, vzero);
v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero);
vi2x0123 = wasm_i32x4_min(vi2x0123, vzero);
v128_t vacc2x4567 = wasm_i32x4_max(vi2x4567, vzero);
vi2x4567 = wasm_i32x4_min(vi2x4567, vzero);
v128_t vacc3x0123 = wasm_i32x4_max(vi3x0123, vzero);
vi3x0123 = wasm_i32x4_min(vi3x0123, vzero);
v128_t vacc3x4567 = wasm_i32x4_max(vi3x4567, vzero);
vi3x4567 = wasm_i32x4_min(vi3x4567, vzero);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vw0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(vi0x4567, vw4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vw0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(vi1x4567, vw4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vw0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(vi2x4567, vw4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vw0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(vi3x4567, vw4567, vacc3x4567);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
o0 += 8;
wasm_v128_store(o1, vacc1x0123);
wasm_v128_store(o1 + 4, vacc1x4567);
o1 += 8;
wasm_v128_store(o2, vacc2x0123);
wasm_v128_store(o2 + 4, vacc2x4567);
o2 += 8;
wasm_v128_store(o3, vacc3x0123);
wasm_v128_store(o3 + 4, vacc3x4567);
o3 += 8;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero);
vi2x0123 = wasm_i32x4_min(vi2x0123, vzero);
v128_t vacc3x0123 = wasm_i32x4_max(vi3x0123, vzero);
vi3x0123 = wasm_i32x4_min(vi3x0123, vzero);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vw0123, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vw0123, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vw0123, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vw0123, vacc3x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
wasm_v128_store(o2, vacc2x0123);
o2 += 4;
wasm_v128_store(o3, vacc3x0123);
o3 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vi1x0123 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
v128_t vi2x0123 = wasm_v128_load(i2);
i2 = (const float*) ((uintptr_t) i2 + c);
v128_t vi3x0123 = wasm_v128_load(i3);
i3 = (const float*) ((uintptr_t) i3 + c);
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero);
vi2x0123 = wasm_i32x4_min(vi2x0123, vzero);
v128_t vacc3x0123 = wasm_i32x4_max(vi3x0123, vzero);
vi3x0123 = wasm_i32x4_min(vi3x0123, vzero);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vw0123, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vw0123, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vw0123, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vw0123, vacc3x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
wasm_v128_store64_lane(o1, vacc1x0123, 0);
wasm_v128_store64_lane(o2, vacc2x0123, 0);
wasm_v128_store64_lane(o3, vacc3x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
o0 += 2;
o1 += 2;
o2 += 2;
o3 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
wasm_v128_store32_lane(o1, vacc1x0123, 0);
wasm_v128_store32_lane(o2, vacc2x0123, 0);
wasm_v128_store32_lane(o3, vacc3x0123, 0);
o0 += 1;
o1 += 1;
o2 += 1;
o3 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
i2 = (const float*) ((uintptr_t) i2 + input_increment);
o2 = (float*) ((uintptr_t) o2 + output_increment);
i3 = (const float*) ((uintptr_t) i3 + input_increment);
o3 = (float*) ((uintptr_t) o3 + output_increment);
rows = doz(rows, 4);
} while (rows != 0);
}
| 8,221 | 36.543379 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmrelaxedsimd-laneselect-1x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-laneselect.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmrelaxedsimd_laneselect_1x16(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const size_t input_increment = input_stride * 1 - channels;
const size_t output_increment = output_stride * 1 - channels;
do {
const float* w = weights;
size_t c = channels;
for (; c >= 16 * sizeof(float); c -= 16 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
const v128_t vw4567 = wasm_v128_load(w + 4);
const v128_t vw89AB = wasm_v128_load(w + 8);
const v128_t vwCDEF = wasm_v128_load(w + 12);
w += 16;
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
const v128_t vi0x89AB = wasm_v128_load(i0 + 8);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 12);
i0 += 16;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc0x4567 = wasm_f32x4_mul(vi0x4567, vw4567);
const v128_t vmask0x4567 = wasm_i32x4_shr(vi0x4567, 31);
v128_t vacc0x89AB = wasm_f32x4_mul(vi0x89AB, vw89AB);
const v128_t vmask0x89AB = wasm_i32x4_shr(vi0x89AB, 31);
v128_t vacc0xCDEF = wasm_f32x4_mul(vi0xCDEF, vwCDEF);
const v128_t vmask0xCDEF = wasm_i32x4_shr(vi0xCDEF, 31);
vacc0x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x0123, vi0x0123, vmask0x0123);
vacc0x4567 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x4567, vi0x4567, vmask0x4567);
vacc0x89AB = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x89AB, vi0x89AB, vmask0x89AB);
vacc0xCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vacc0xCDEF, vi0xCDEF, vmask0xCDEF);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
wasm_v128_store(o0 + 8, vacc0x89AB);
wasm_v128_store(o0 + 12, vacc0xCDEF);
o0 += 16;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
vacc0x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x0123, vi0x0123, vmask0x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
vacc0x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x0123, vi0x0123, vmask0x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
o0 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
o0 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
rows = doz(rows, 1);
} while (rows != 0);
}
| 3,938 | 32.10084 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmrelaxedsimd-laneselect-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-laneselect.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmrelaxedsimd_laneselect_1x4(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const size_t input_increment = input_stride * 1 - channels;
const size_t output_increment = output_stride * 1 - channels;
do {
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
vacc0x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x0123, vi0x0123, vmask0x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
vacc0x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x0123, vi0x0123, vmask0x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
o0 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
o0 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
rows = doz(rows, 1);
} while (rows != 0);
}
| 2,362 | 26.476744 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmrelaxedsimd-laneselect-1x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-laneselect.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmrelaxedsimd_laneselect_1x8(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const size_t input_increment = input_stride * 1 - channels;
const size_t output_increment = output_stride * 1 - channels;
do {
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
const v128_t vw4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc0x4567 = wasm_f32x4_mul(vi0x4567, vw4567);
const v128_t vmask0x4567 = wasm_i32x4_shr(vi0x4567, 31);
vacc0x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x0123, vi0x0123, vmask0x0123);
vacc0x4567 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x4567, vi0x4567, vmask0x4567);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
o0 += 8;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
vacc0x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x0123, vi0x0123, vmask0x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
vacc0x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x0123, vi0x0123, vmask0x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
o0 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
o0 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
rows = doz(rows, 1);
} while (rows != 0);
}
| 3,197 | 28.88785 | 94 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.