repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x2c4-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/MRx2c4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x2c4__sse(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m128 vacc0x0c4 = _mm_load_ss(w);
__m128 vacc0x1c4 = _mm_load_ss(w + 1);
__m128 vacc1x0c4 = vacc0x0c4;
__m128 vacc1x1c4 = vacc0x1c4;
__m128 vacc2x0c4 = vacc0x0c4;
__m128 vacc2x1c4 = vacc0x1c4;
__m128 vacc3x0c4 = vacc0x0c4;
__m128 vacc3x1c4 = vacc0x1c4;
w += 2;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
const __m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128 vb0 = _mm_loadu_ps(w);
const __m128 vb1 = _mm_loadu_ps(w + 4);
w += 8;
vacc0x0c4 = _mm_add_ps(vacc0x0c4, _mm_mul_ps(va0, vb0));
vacc0x1c4 = _mm_add_ps(vacc0x1c4, _mm_mul_ps(va0, vb1));
vacc1x0c4 = _mm_add_ps(vacc1x0c4, _mm_mul_ps(va1, vb0));
vacc1x1c4 = _mm_add_ps(vacc1x1c4, _mm_mul_ps(va1, vb1));
vacc2x0c4 = _mm_add_ps(vacc2x0c4, _mm_mul_ps(va2, vb0));
vacc2x1c4 = _mm_add_ps(vacc2x1c4, _mm_mul_ps(va2, vb1));
vacc3x0c4 = _mm_add_ps(vacc3x0c4, _mm_mul_ps(va3, vb0));
vacc3x1c4 = _mm_add_ps(vacc3x1c4, _mm_mul_ps(va3, vb1));
}
if XNN_UNLIKELY(k != 0) {
const __m128 va0 = _mm_loadu_ps(a0);
const __m128 va1 = _mm_loadu_ps(a1);
const __m128 va2 = _mm_loadu_ps(a2);
const __m128 va3 = _mm_loadu_ps(a3);
const __m128 vb0 = _mm_loadu_ps(w);
const __m128 vb1 = _mm_loadu_ps(w + 4);
w += 8;
const __m128 vmask0 = _mm_cmpeq_ps(_mm_setzero_ps(), vb0);
const __m128 vmask1 = _mm_cmpeq_ps(_mm_setzero_ps(), vb1);
vacc0x0c4 = _mm_add_ps(vacc0x0c4, _mm_mul_ps(_mm_andnot_ps(vmask0, va0), vb0));
vacc0x1c4 = _mm_add_ps(vacc0x1c4, _mm_mul_ps(_mm_andnot_ps(vmask1, va0), vb1));
vacc1x0c4 = _mm_add_ps(vacc1x0c4, _mm_mul_ps(_mm_andnot_ps(vmask0, va1), vb0));
vacc1x1c4 = _mm_add_ps(vacc1x1c4, _mm_mul_ps(_mm_andnot_ps(vmask1, va1), vb1));
vacc2x0c4 = _mm_add_ps(vacc2x0c4, _mm_mul_ps(_mm_andnot_ps(vmask0, va2), vb0));
vacc2x1c4 = _mm_add_ps(vacc2x1c4, _mm_mul_ps(_mm_andnot_ps(vmask1, va2), vb1));
vacc3x0c4 = _mm_add_ps(vacc3x0c4, _mm_mul_ps(_mm_andnot_ps(vmask0, va3), vb0));
vacc3x1c4 = _mm_add_ps(vacc3x1c4, _mm_mul_ps(_mm_andnot_ps(vmask1, va3), vb1));
}
p -= 4 * sizeof(void*);
} while (p != 0);
const __m128 vacc0x01c2 = _mm_add_ps(_mm_unpacklo_ps(vacc0x0c4, vacc0x1c4), _mm_unpackhi_ps(vacc0x0c4, vacc0x1c4));
const __m128 vacc1x01c2 = _mm_add_ps(_mm_unpacklo_ps(vacc1x0c4, vacc1x1c4), _mm_unpackhi_ps(vacc1x0c4, vacc1x1c4));
const __m128 vacc2x01c2 = _mm_add_ps(_mm_unpacklo_ps(vacc2x0c4, vacc2x1c4), _mm_unpackhi_ps(vacc2x0c4, vacc2x1c4));
const __m128 vacc3x01c2 = _mm_add_ps(_mm_unpacklo_ps(vacc3x0c4, vacc3x1c4), _mm_unpackhi_ps(vacc3x0c4, vacc3x1c4));
__m128 vacc01x01 = _mm_add_ps(_mm_movelh_ps(vacc0x01c2, vacc1x01c2), _mm_movehl_ps(vacc1x01c2, vacc0x01c2));
__m128 vacc23x01 = _mm_add_ps(_mm_movelh_ps(vacc2x01c2, vacc3x01c2), _mm_movehl_ps(vacc3x01c2, vacc2x01c2));
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc01x01 = _mm_min_ps(vacc01x01, vmax);
vacc23x01 = _mm_min_ps(vacc23x01, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc01x01 = _mm_max_ps(vacc01x01, vmin);
vacc23x01 = _mm_max_ps(vacc23x01, vmin);
if XNN_LIKELY(nc >= 2) {
_mm_storeh_pi((__m64*) c3, vacc23x01);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storel_pi((__m64*) c2, vacc23x01);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeh_pi((__m64*) c1, vacc01x01);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storel_pi((__m64*) c0, vacc01x01);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
assert(nc == 1);
_mm_store_ss(c3, _mm_movehl_ps(vacc23x01, vacc23x01));
_mm_store_ss(c2, vacc23x01);
_mm_store_ss(c1, _mm_movehl_ps(vacc01x01, vacc01x01));
_mm_store_ss(c0, vacc01x01);
nc = 0;
}
} while (nc != 0);
}
| 6,301 | 34.011111 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x2c4-minmax-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/MRx2c4-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x2c4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0c4 = wasm_v128_load32_zero(w);
v128_t vacc0x1c4 = wasm_v128_load32_zero(w + 1);
v128_t vacc1x0c4 = vacc0x0c4;
v128_t vacc1x1c4 = vacc0x1c4;
v128_t vacc2x0c4 = vacc0x0c4;
v128_t vacc2x1c4 = vacc0x1c4;
v128_t vacc3x0c4 = vacc0x0c4;
v128_t vacc3x1c4 = vacc0x1c4;
w += 2;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
vacc0x0c4 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0, vacc0x0c4);
vacc0x1c4 = __builtin_wasm_relaxed_madd_f32x4(va0, vb1, vacc0x1c4);
vacc1x0c4 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0, vacc1x0c4);
vacc1x1c4 = __builtin_wasm_relaxed_madd_f32x4(va1, vb1, vacc1x1c4);
vacc2x0c4 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0, vacc2x0c4);
vacc2x1c4 = __builtin_wasm_relaxed_madd_f32x4(va2, vb1, vacc2x1c4);
vacc3x0c4 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0, vacc3x0c4);
vacc3x1c4 = __builtin_wasm_relaxed_madd_f32x4(va3, vb1, vacc3x1c4);
}
if XNN_UNLIKELY(k != 0) {
const v128_t va0 = wasm_v128_load(a0);
const v128_t va1 = wasm_v128_load(a1);
const v128_t va2 = wasm_v128_load(a2);
const v128_t va3 = wasm_v128_load(a3);
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vmask0 = wasm_f32x4_eq(vb0, vzero);
const v128_t vmask1 = wasm_f32x4_eq(vb1, vzero);
vacc0x0c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, vmask0), vb0, vacc0x0c4);
vacc0x1c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, vmask1), vb1, vacc0x1c4);
vacc1x0c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, vmask0), vb0, vacc1x0c4);
vacc1x1c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, vmask1), vb1, vacc1x1c4);
vacc2x0c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, vmask0), vb0, vacc2x0c4);
vacc2x1c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, vmask1), vb1, vacc2x1c4);
vacc3x0c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, vmask0), vb0, vacc3x0c4);
vacc3x1c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, vmask1), vb1, vacc3x1c4);
}
p -= 4 * sizeof(void*);
} while (p != 0);
const v128_t vacc0x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 2, 6, 3, 7));
const v128_t vacc1x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 2, 6, 3, 7));
const v128_t vacc2x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 2, 6, 3, 7));
const v128_t vacc3x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 2, 6, 3, 7));
v128_t vacc01x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 2, 3, 6, 7));
v128_t vacc23x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 2, 3, 6, 7));
vacc01x01 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc01x01);
vacc23x01 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc23x01);
vacc01x01 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc01x01);
vacc23x01 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc23x01);
if XNN_LIKELY(nc >= 2) {
wasm_v128_store64_lane(c3, vacc23x01, 1);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store64_lane(c2, vacc23x01, 0);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store64_lane(c1, vacc01x01, 1);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store64_lane(c0, vacc01x01, 0);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
assert(nc == 1);
wasm_v128_store32_lane(c3, vacc23x01, 2);
wasm_v128_store32_lane(c2, vacc23x01, 0);
wasm_v128_store32_lane(c1, vacc01x01, 2);
wasm_v128_store32_lane(c0, vacc01x01, 0);
nc = 0;
}
} while (nc != 0);
}
| 7,026 | 35.409326 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x2c4-minmax-wasmrelaxedsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/MRx2c4-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x2c4__wasmrelaxedsimd(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0c4 = wasm_v128_load32_zero(w);
v128_t vacc0x1c4 = wasm_v128_load32_zero(w + 1);
v128_t vacc1x0c4 = vacc0x0c4;
v128_t vacc1x1c4 = vacc0x1c4;
v128_t vacc2x0c4 = vacc0x0c4;
v128_t vacc2x1c4 = vacc0x1c4;
v128_t vacc3x0c4 = vacc0x0c4;
v128_t vacc3x1c4 = vacc0x1c4;
w += 2;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
vacc0x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0), vacc0x0c4);
vacc0x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb1), vacc0x1c4);
vacc1x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0), vacc1x0c4);
vacc1x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb1), vacc1x1c4);
vacc2x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0), vacc2x0c4);
vacc2x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb1), vacc2x1c4);
vacc3x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0), vacc3x0c4);
vacc3x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb1), vacc3x1c4);
}
if XNN_UNLIKELY(k != 0) {
const v128_t va0 = wasm_v128_load(a0);
const v128_t va1 = wasm_v128_load(a1);
const v128_t va2 = wasm_v128_load(a2);
const v128_t va3 = wasm_v128_load(a3);
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vmask0 = wasm_f32x4_eq(vb0, vzero);
const v128_t vmask1 = wasm_f32x4_eq(vb1, vzero);
vacc0x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, vmask0), vb0), vacc0x0c4);
vacc0x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, vmask1), vb1), vacc0x1c4);
vacc1x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, vmask0), vb0), vacc1x0c4);
vacc1x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, vmask1), vb1), vacc1x1c4);
vacc2x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, vmask0), vb0), vacc2x0c4);
vacc2x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, vmask1), vb1), vacc2x1c4);
vacc3x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, vmask0), vb0), vacc3x0c4);
vacc3x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, vmask1), vb1), vacc3x1c4);
}
p -= 4 * sizeof(void*);
} while (p != 0);
const v128_t vacc0x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 2, 6, 3, 7));
const v128_t vacc1x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 2, 6, 3, 7));
const v128_t vacc2x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 2, 6, 3, 7));
const v128_t vacc3x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 2, 6, 3, 7));
v128_t vacc01x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 2, 3, 6, 7));
v128_t vacc23x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 2, 3, 6, 7));
vacc01x01 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc01x01);
vacc23x01 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc23x01);
vacc01x01 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc01x01);
vacc23x01 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc23x01);
if XNN_LIKELY(nc >= 2) {
wasm_v128_store64_lane(c3, vacc23x01, 1);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store64_lane(c2, vacc23x01, 0);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store64_lane(c1, vacc01x01, 1);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store64_lane(c0, vacc01x01, 0);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
assert(nc == 1);
wasm_v128_store32_lane(c3, vacc23x01, 2);
wasm_v128_store32_lane(c2, vacc23x01, 0);
wasm_v128_store32_lane(c1, vacc01x01, 2);
wasm_v128_store32_lane(c0, vacc01x01, 0);
nc = 0;
}
} while (nc != 0);
}
| 6,974 | 35.139896 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x2c4-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/MRx2c4-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x2c4__wasmsimd_arm(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0c4 = wasm_v128_load32_zero(w);
v128_t vacc0x1c4 = wasm_v128_load32_zero(w + 1);
v128_t vacc1x0c4 = vacc0x0c4;
v128_t vacc1x1c4 = vacc0x1c4;
v128_t vacc2x0c4 = vacc0x0c4;
v128_t vacc2x1c4 = vacc0x1c4;
v128_t vacc3x0c4 = vacc0x0c4;
v128_t vacc3x1c4 = vacc0x1c4;
w += 2;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
vacc0x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0), vacc0x0c4);
vacc0x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb1), vacc0x1c4);
vacc1x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0), vacc1x0c4);
vacc1x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb1), vacc1x1c4);
vacc2x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0), vacc2x0c4);
vacc2x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb1), vacc2x1c4);
vacc3x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0), vacc3x0c4);
vacc3x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb1), vacc3x1c4);
}
if XNN_UNLIKELY(k != 0) {
const v128_t va0 = wasm_v128_load(a0);
const v128_t va1 = wasm_v128_load(a1);
const v128_t va2 = wasm_v128_load(a2);
const v128_t va3 = wasm_v128_load(a3);
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vmask0 = wasm_f32x4_eq(vb0, vzero);
const v128_t vmask1 = wasm_f32x4_eq(vb1, vzero);
vacc0x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, vmask0), vb0), vacc0x0c4);
vacc0x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, vmask1), vb1), vacc0x1c4);
vacc1x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, vmask0), vb0), vacc1x0c4);
vacc1x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, vmask1), vb1), vacc1x1c4);
vacc2x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, vmask0), vb0), vacc2x0c4);
vacc2x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, vmask1), vb1), vacc2x1c4);
vacc3x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, vmask0), vb0), vacc3x0c4);
vacc3x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, vmask1), vb1), vacc3x1c4);
}
p -= 4 * sizeof(void*);
} while (p != 0);
const v128_t vacc0x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 2, 6, 3, 7));
const v128_t vacc1x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 2, 6, 3, 7));
const v128_t vacc2x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 2, 6, 3, 7));
const v128_t vacc3x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 2, 6, 3, 7));
v128_t vacc01x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 2, 3, 6, 7));
v128_t vacc23x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 2, 3, 6, 7));
vacc01x01 = wasm_f32x4_max(vmin, vacc01x01);
vacc23x01 = wasm_f32x4_max(vmin, vacc23x01);
vacc01x01 = wasm_f32x4_min(vmax, vacc01x01);
vacc23x01 = wasm_f32x4_min(vmax, vacc23x01);
if XNN_LIKELY(nc >= 2) {
wasm_v128_store64_lane(c3, vacc23x01, 1);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store64_lane(c2, vacc23x01, 0);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store64_lane(c1, vacc01x01, 1);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store64_lane(c0, vacc01x01, 0);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
assert(nc == 1);
wasm_v128_store32_lane(c3, vacc23x01, 2);
wasm_v128_store32_lane(c2, vacc23x01, 0);
wasm_v128_store32_lane(c1, vacc01x01, 2);
wasm_v128_store32_lane(c0, vacc01x01, 0);
nc = 0;
}
} while (nc != 0);
}
| 6,899 | 34.751295 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x2c4-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/MRx2c4-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x2c4__wasmsimd_x86(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0c4 = wasm_v128_load32_zero(w);
v128_t vacc0x1c4 = wasm_v128_load32_zero(w + 1);
v128_t vacc1x0c4 = vacc0x0c4;
v128_t vacc1x1c4 = vacc0x1c4;
v128_t vacc2x0c4 = vacc0x0c4;
v128_t vacc2x1c4 = vacc0x1c4;
v128_t vacc3x0c4 = vacc0x0c4;
v128_t vacc3x1c4 = vacc0x1c4;
w += 2;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
vacc0x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0), vacc0x0c4);
vacc0x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb1), vacc0x1c4);
vacc1x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0), vacc1x0c4);
vacc1x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb1), vacc1x1c4);
vacc2x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0), vacc2x0c4);
vacc2x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb1), vacc2x1c4);
vacc3x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0), vacc3x0c4);
vacc3x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb1), vacc3x1c4);
}
if XNN_UNLIKELY(k != 0) {
const v128_t va0 = wasm_v128_load(a0);
const v128_t va1 = wasm_v128_load(a1);
const v128_t va2 = wasm_v128_load(a2);
const v128_t va3 = wasm_v128_load(a3);
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vmask0 = wasm_f32x4_eq(vb0, vzero);
const v128_t vmask1 = wasm_f32x4_eq(vb1, vzero);
vacc0x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, vmask0), vb0), vacc0x0c4);
vacc0x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, vmask1), vb1), vacc0x1c4);
vacc1x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, vmask0), vb0), vacc1x0c4);
vacc1x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, vmask1), vb1), vacc1x1c4);
vacc2x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, vmask0), vb0), vacc2x0c4);
vacc2x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, vmask1), vb1), vacc2x1c4);
vacc3x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, vmask0), vb0), vacc3x0c4);
vacc3x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, vmask1), vb1), vacc3x1c4);
}
p -= 4 * sizeof(void*);
} while (p != 0);
const v128_t vacc0x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 2, 6, 3, 7));
const v128_t vacc1x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 2, 6, 3, 7));
const v128_t vacc2x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 2, 6, 3, 7));
const v128_t vacc3x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 2, 6, 3, 7));
v128_t vacc01x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 2, 3, 6, 7));
v128_t vacc23x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 2, 3, 6, 7));
vacc01x01 = wasm_f32x4_pmax(vmin, vacc01x01);
vacc23x01 = wasm_f32x4_pmax(vmin, vacc23x01);
vacc01x01 = wasm_f32x4_pmin(vmax, vacc01x01);
vacc23x01 = wasm_f32x4_pmin(vmax, vacc23x01);
if XNN_LIKELY(nc >= 2) {
wasm_v128_store64_lane(c3, vacc23x01, 1);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store64_lane(c2, vacc23x01, 0);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store64_lane(c1, vacc01x01, 1);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store64_lane(c0, vacc01x01, 0);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
assert(nc == 1);
wasm_v128_store32_lane(c3, vacc23x01, 2);
wasm_v128_store32_lane(c2, vacc23x01, 0);
wasm_v128_store32_lane(c1, vacc01x01, 2);
wasm_v128_store32_lane(c0, vacc01x01, 0);
nc = 0;
}
} while (nc != 0);
}
| 6,903 | 34.772021 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x2c4-relu-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/MRx2c4-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_relu_ukernel_4x2c4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
v128_t vacc0x0c4 = wasm_v128_load32_zero(w);
v128_t vacc0x1c4 = wasm_v128_load32_zero(w + 1);
v128_t vacc1x0c4 = vacc0x0c4;
v128_t vacc1x1c4 = vacc0x1c4;
v128_t vacc2x0c4 = vacc0x0c4;
v128_t vacc2x1c4 = vacc0x1c4;
v128_t vacc3x0c4 = vacc0x0c4;
v128_t vacc3x1c4 = vacc0x1c4;
w += 2;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
vacc0x0c4 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0, vacc0x0c4);
vacc0x1c4 = __builtin_wasm_relaxed_madd_f32x4(va0, vb1, vacc0x1c4);
vacc1x0c4 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0, vacc1x0c4);
vacc1x1c4 = __builtin_wasm_relaxed_madd_f32x4(va1, vb1, vacc1x1c4);
vacc2x0c4 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0, vacc2x0c4);
vacc2x1c4 = __builtin_wasm_relaxed_madd_f32x4(va2, vb1, vacc2x1c4);
vacc3x0c4 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0, vacc3x0c4);
vacc3x1c4 = __builtin_wasm_relaxed_madd_f32x4(va3, vb1, vacc3x1c4);
}
if XNN_UNLIKELY(k != 0) {
const v128_t va0 = wasm_v128_load(a0);
const v128_t va1 = wasm_v128_load(a1);
const v128_t va2 = wasm_v128_load(a2);
const v128_t va3 = wasm_v128_load(a3);
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vmask0 = wasm_f32x4_eq(vb0, vzero);
const v128_t vmask1 = wasm_f32x4_eq(vb1, vzero);
vacc0x0c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, vmask0), vb0, vacc0x0c4);
vacc0x1c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, vmask1), vb1, vacc0x1c4);
vacc1x0c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, vmask0), vb0, vacc1x0c4);
vacc1x1c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, vmask1), vb1, vacc1x1c4);
vacc2x0c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, vmask0), vb0, vacc2x0c4);
vacc2x1c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, vmask1), vb1, vacc2x1c4);
vacc3x0c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, vmask0), vb0, vacc3x0c4);
vacc3x1c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, vmask1), vb1, vacc3x1c4);
}
p -= 4 * sizeof(void*);
} while (p != 0);
const v128_t vacc0x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 2, 6, 3, 7));
const v128_t vacc1x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 2, 6, 3, 7));
const v128_t vacc2x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 2, 6, 3, 7));
const v128_t vacc3x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 2, 6, 3, 7));
v128_t vacc01x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 2, 3, 6, 7));
v128_t vacc23x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 2, 3, 6, 7));
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc01x01 = wasm_i32x4_max(vacc01x01, vzero);
vacc23x01 = wasm_i32x4_max(vacc23x01, vzero);
if XNN_LIKELY(nc >= 2) {
wasm_v128_store64_lane(c3, vacc23x01, 1);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store64_lane(c2, vacc23x01, 0);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store64_lane(c1, vacc01x01, 1);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store64_lane(c0, vacc01x01, 0);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
assert(nc == 1);
wasm_v128_store32_lane(c3, vacc23x01, 2);
wasm_v128_store32_lane(c2, vacc23x01, 0);
wasm_v128_store32_lane(c1, vacc01x01, 2);
wasm_v128_store32_lane(c0, vacc01x01, 0);
nc = 0;
}
} while (nc != 0);
}
| 6,769 | 34.820106 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x2c4-relu-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/MRx2c4-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_relu_ukernel_4x2c4__wasmsimd(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
v128_t vacc0x0c4 = wasm_v128_load32_zero(w);
v128_t vacc0x1c4 = wasm_v128_load32_zero(w + 1);
v128_t vacc1x0c4 = vacc0x0c4;
v128_t vacc1x1c4 = vacc0x1c4;
v128_t vacc2x0c4 = vacc0x0c4;
v128_t vacc2x1c4 = vacc0x1c4;
v128_t vacc3x0c4 = vacc0x0c4;
v128_t vacc3x1c4 = vacc0x1c4;
w += 2;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
vacc0x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0), vacc0x0c4);
vacc0x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb1), vacc0x1c4);
vacc1x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0), vacc1x0c4);
vacc1x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb1), vacc1x1c4);
vacc2x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0), vacc2x0c4);
vacc2x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb1), vacc2x1c4);
vacc3x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0), vacc3x0c4);
vacc3x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb1), vacc3x1c4);
}
if XNN_UNLIKELY(k != 0) {
const v128_t va0 = wasm_v128_load(a0);
const v128_t va1 = wasm_v128_load(a1);
const v128_t va2 = wasm_v128_load(a2);
const v128_t va3 = wasm_v128_load(a3);
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vmask0 = wasm_f32x4_eq(vb0, vzero);
const v128_t vmask1 = wasm_f32x4_eq(vb1, vzero);
vacc0x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, vmask0), vb0), vacc0x0c4);
vacc0x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, vmask1), vb1), vacc0x1c4);
vacc1x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, vmask0), vb0), vacc1x0c4);
vacc1x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, vmask1), vb1), vacc1x1c4);
vacc2x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, vmask0), vb0), vacc2x0c4);
vacc2x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, vmask1), vb1), vacc2x1c4);
vacc3x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, vmask0), vb0), vacc3x0c4);
vacc3x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, vmask1), vb1), vacc3x1c4);
}
p -= 4 * sizeof(void*);
} while (p != 0);
const v128_t vacc0x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 2, 6, 3, 7));
const v128_t vacc1x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 2, 6, 3, 7));
const v128_t vacc2x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 2, 6, 3, 7));
const v128_t vacc3x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 2, 6, 3, 7));
v128_t vacc01x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 2, 3, 6, 7));
v128_t vacc23x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 2, 3, 6, 7));
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc01x01 = wasm_i32x4_max(vacc01x01, vzero);
vacc23x01 = wasm_i32x4_max(vacc23x01, vzero);
if XNN_LIKELY(nc >= 2) {
wasm_v128_store64_lane(c3, vacc23x01, 1);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store64_lane(c2, vacc23x01, 0);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store64_lane(c1, vacc01x01, 1);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store64_lane(c0, vacc01x01, 0);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
assert(nc == 1);
wasm_v128_store32_lane(c3, vacc23x01, 2);
wasm_v128_store32_lane(c2, vacc23x01, 0);
wasm_v128_store32_lane(c1, vacc01x01, 2);
wasm_v128_store32_lane(c0, vacc01x01, 0);
nc = 0;
}
} while (nc != 0);
}
| 6,710 | 34.507937 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x2c4-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/MRx2c4-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_ukernel_4x2c4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
v128_t vacc0x0c4 = wasm_v128_load32_zero(w);
v128_t vacc0x1c4 = wasm_v128_load32_zero(w + 1);
v128_t vacc1x0c4 = vacc0x0c4;
v128_t vacc1x1c4 = vacc0x1c4;
v128_t vacc2x0c4 = vacc0x0c4;
v128_t vacc2x1c4 = vacc0x1c4;
v128_t vacc3x0c4 = vacc0x0c4;
v128_t vacc3x1c4 = vacc0x1c4;
w += 2;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
vacc0x0c4 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0, vacc0x0c4);
vacc0x1c4 = __builtin_wasm_relaxed_madd_f32x4(va0, vb1, vacc0x1c4);
vacc1x0c4 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0, vacc1x0c4);
vacc1x1c4 = __builtin_wasm_relaxed_madd_f32x4(va1, vb1, vacc1x1c4);
vacc2x0c4 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0, vacc2x0c4);
vacc2x1c4 = __builtin_wasm_relaxed_madd_f32x4(va2, vb1, vacc2x1c4);
vacc3x0c4 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0, vacc3x0c4);
vacc3x1c4 = __builtin_wasm_relaxed_madd_f32x4(va3, vb1, vacc3x1c4);
}
if XNN_UNLIKELY(k != 0) {
const v128_t va0 = wasm_v128_load(a0);
const v128_t va1 = wasm_v128_load(a1);
const v128_t va2 = wasm_v128_load(a2);
const v128_t va3 = wasm_v128_load(a3);
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vmask0 = wasm_f32x4_eq(vb0, vzero);
const v128_t vmask1 = wasm_f32x4_eq(vb1, vzero);
vacc0x0c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, vmask0), vb0, vacc0x0c4);
vacc0x1c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, vmask1), vb1, vacc0x1c4);
vacc1x0c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, vmask0), vb0, vacc1x0c4);
vacc1x1c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, vmask1), vb1, vacc1x1c4);
vacc2x0c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, vmask0), vb0, vacc2x0c4);
vacc2x1c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, vmask1), vb1, vacc2x1c4);
vacc3x0c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, vmask0), vb0, vacc3x0c4);
vacc3x1c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, vmask1), vb1, vacc3x1c4);
}
p -= 4 * sizeof(void*);
} while (p != 0);
const v128_t vacc0x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 2, 6, 3, 7));
const v128_t vacc1x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 2, 6, 3, 7));
const v128_t vacc2x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 2, 6, 3, 7));
const v128_t vacc3x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 2, 6, 3, 7));
v128_t vacc01x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 2, 3, 6, 7));
v128_t vacc23x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 2, 3, 6, 7));
if XNN_LIKELY(nc >= 2) {
wasm_v128_store64_lane(c3, vacc23x01, 1);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store64_lane(c2, vacc23x01, 0);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store64_lane(c1, vacc01x01, 1);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store64_lane(c0, vacc01x01, 0);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
assert(nc == 1);
wasm_v128_store32_lane(c3, vacc23x01, 2);
wasm_v128_store32_lane(c2, vacc23x01, 0);
wasm_v128_store32_lane(c1, vacc01x01, 2);
wasm_v128_store32_lane(c0, vacc01x01, 0);
nc = 0;
}
} while (nc != 0);
}
| 6,615 | 34.569892 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x2c4-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/MRx2c4-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_ukernel_4x2c4__wasmsimd(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
v128_t vacc0x0c4 = wasm_v128_load32_zero(w);
v128_t vacc0x1c4 = wasm_v128_load32_zero(w + 1);
v128_t vacc1x0c4 = vacc0x0c4;
v128_t vacc1x1c4 = vacc0x1c4;
v128_t vacc2x0c4 = vacc0x0c4;
v128_t vacc2x1c4 = vacc0x1c4;
v128_t vacc3x0c4 = vacc0x0c4;
v128_t vacc3x1c4 = vacc0x1c4;
w += 2;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
vacc0x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0), vacc0x0c4);
vacc0x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb1), vacc0x1c4);
vacc1x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0), vacc1x0c4);
vacc1x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb1), vacc1x1c4);
vacc2x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0), vacc2x0c4);
vacc2x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb1), vacc2x1c4);
vacc3x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0), vacc3x0c4);
vacc3x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb1), vacc3x1c4);
}
if XNN_UNLIKELY(k != 0) {
const v128_t va0 = wasm_v128_load(a0);
const v128_t va1 = wasm_v128_load(a1);
const v128_t va2 = wasm_v128_load(a2);
const v128_t va3 = wasm_v128_load(a3);
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vmask0 = wasm_f32x4_eq(vb0, vzero);
const v128_t vmask1 = wasm_f32x4_eq(vb1, vzero);
vacc0x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, vmask0), vb0), vacc0x0c4);
vacc0x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, vmask1), vb1), vacc0x1c4);
vacc1x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, vmask0), vb0), vacc1x0c4);
vacc1x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, vmask1), vb1), vacc1x1c4);
vacc2x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, vmask0), vb0), vacc2x0c4);
vacc2x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, vmask1), vb1), vacc2x1c4);
vacc3x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, vmask0), vb0), vacc3x0c4);
vacc3x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, vmask1), vb1), vacc3x1c4);
}
p -= 4 * sizeof(void*);
} while (p != 0);
const v128_t vacc0x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 2, 6, 3, 7));
const v128_t vacc1x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 2, 6, 3, 7));
const v128_t vacc2x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 2, 6, 3, 7));
const v128_t vacc3x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 2, 6, 3, 7));
v128_t vacc01x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 2, 3, 6, 7));
v128_t vacc23x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 2, 3, 6, 7));
if XNN_LIKELY(nc >= 2) {
wasm_v128_store64_lane(c3, vacc23x01, 1);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store64_lane(c2, vacc23x01, 0);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store64_lane(c1, vacc01x01, 1);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store64_lane(c0, vacc01x01, 0);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
assert(nc == 1);
wasm_v128_store32_lane(c3, vacc23x01, 2);
wasm_v128_store32_lane(c2, vacc23x01, 0);
wasm_v128_store32_lane(c1, vacc01x01, 2);
wasm_v128_store32_lane(c0, vacc01x01, 0);
nc = 0;
}
} while (nc != 0);
}
| 6,556 | 34.252688 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x4-minmax-aarch64-neonfma-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x4__aarch64_neonfma_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc3x0123 = vacc0x0123;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0);
const float32x4_t va1 = vld1q_dup_f32(a1);
const float32x4_t va2 = vld1q_dup_f32(a2);
const float32x4_t va3 = vld1q_dup_f32(a3);
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
}
p -= 4 * sizeof(void*);
} while (p != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
if XNN_LIKELY(nc >= 4) {
vst1q_f32(c3, vacc3x0123);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,568 | 30.642045 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x4-minmax-neon-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x4__neon_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc3x0123 = vacc0x0123;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0);
const float32x4_t va1 = vld1q_dup_f32(a1);
const float32x4_t va2 = vld1q_dup_f32(a2);
const float32x4_t va3 = vld1q_dup_f32(a3);
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
}
p -= 4 * sizeof(void*);
} while (p != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
if XNN_LIKELY(nc >= 4) {
vst1q_f32(c3, vacc3x0123);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,557 | 30.579545 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x4-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_f32_igemm_minmax_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
float vacc00 = w[0];
float vacc01 = w[1];
float vacc02 = w[2];
float vacc03 = w[3];
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc22 = vacc02;
float vacc23 = vacc03;
float vacc30 = vacc00;
float vacc31 = vacc01;
float vacc32 = vacc02;
float vacc33 = vacc03;
w += 4;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = w[0];
const float vb1 = w[1];
const float vb2 = w[2];
const float vb3 = w[3];
w += 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc22 = math_muladd_f32(va2, vb2, vacc22);
vacc23 = math_muladd_f32(va2, vb3, vacc23);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
vacc32 = math_muladd_f32(va3, vb2, vacc32);
vacc33 = math_muladd_f32(va3, vb3, vacc33);
k -= sizeof(float);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
vacc00 = math_max_f32(vacc00, vmin);
vacc01 = math_max_f32(vacc01, vmin);
vacc02 = math_max_f32(vacc02, vmin);
vacc03 = math_max_f32(vacc03, vmin);
vacc10 = math_max_f32(vacc10, vmin);
vacc11 = math_max_f32(vacc11, vmin);
vacc12 = math_max_f32(vacc12, vmin);
vacc13 = math_max_f32(vacc13, vmin);
vacc20 = math_max_f32(vacc20, vmin);
vacc21 = math_max_f32(vacc21, vmin);
vacc22 = math_max_f32(vacc22, vmin);
vacc23 = math_max_f32(vacc23, vmin);
vacc30 = math_max_f32(vacc30, vmin);
vacc31 = math_max_f32(vacc31, vmin);
vacc32 = math_max_f32(vacc32, vmin);
vacc33 = math_max_f32(vacc33, vmin);
vacc00 = math_min_f32(vacc00, vmax);
vacc01 = math_min_f32(vacc01, vmax);
vacc02 = math_min_f32(vacc02, vmax);
vacc03 = math_min_f32(vacc03, vmax);
vacc10 = math_min_f32(vacc10, vmax);
vacc11 = math_min_f32(vacc11, vmax);
vacc12 = math_min_f32(vacc12, vmax);
vacc13 = math_min_f32(vacc13, vmax);
vacc20 = math_min_f32(vacc20, vmax);
vacc21 = math_min_f32(vacc21, vmax);
vacc22 = math_min_f32(vacc22, vmax);
vacc23 = math_min_f32(vacc23, vmax);
vacc30 = math_min_f32(vacc30, vmax);
vacc31 = math_min_f32(vacc31, vmax);
vacc32 = math_min_f32(vacc32, vmax);
vacc33 = math_min_f32(vacc33, vmax);
if XNN_LIKELY(nc >= 4) {
c3[0] = vacc30;
c3[1] = vacc31;
c3[2] = vacc32;
c3[3] = vacc33;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2[2] = vacc22;
c2[3] = vacc23;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = vacc30;
c3[1] = vacc31;
vacc30 = vacc32;
c3 += 2;
c2[0] = vacc20;
c2[1] = vacc21;
vacc20 = vacc22;
c2 += 2;
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 6,347 | 27.339286 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x4-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_f32_igemm_minmax_ukernel_4x4__wasm(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
float vacc00 = w[0];
float vacc01 = w[1];
float vacc02 = w[2];
float vacc03 = w[3];
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc22 = vacc02;
float vacc23 = vacc03;
float vacc30 = vacc00;
float vacc31 = vacc01;
float vacc32 = vacc02;
float vacc33 = vacc03;
w += 4;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = w[0];
const float vb1 = w[1];
const float vb2 = w[2];
const float vb3 = w[3];
w += 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc22 = math_muladd_f32(va2, vb2, vacc22);
vacc23 = math_muladd_f32(va2, vb3, vacc23);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
vacc32 = math_muladd_f32(va3, vb2, vacc32);
vacc33 = math_muladd_f32(va3, vb3, vacc33);
k -= sizeof(float);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
vacc00 = __builtin_wasm_max_f32(vacc00, vmin);
vacc01 = __builtin_wasm_max_f32(vacc01, vmin);
vacc02 = __builtin_wasm_max_f32(vacc02, vmin);
vacc03 = __builtin_wasm_max_f32(vacc03, vmin);
vacc10 = __builtin_wasm_max_f32(vacc10, vmin);
vacc11 = __builtin_wasm_max_f32(vacc11, vmin);
vacc12 = __builtin_wasm_max_f32(vacc12, vmin);
vacc13 = __builtin_wasm_max_f32(vacc13, vmin);
vacc20 = __builtin_wasm_max_f32(vacc20, vmin);
vacc21 = __builtin_wasm_max_f32(vacc21, vmin);
vacc22 = __builtin_wasm_max_f32(vacc22, vmin);
vacc23 = __builtin_wasm_max_f32(vacc23, vmin);
vacc30 = __builtin_wasm_max_f32(vacc30, vmin);
vacc31 = __builtin_wasm_max_f32(vacc31, vmin);
vacc32 = __builtin_wasm_max_f32(vacc32, vmin);
vacc33 = __builtin_wasm_max_f32(vacc33, vmin);
vacc00 = __builtin_wasm_min_f32(vacc00, vmax);
vacc01 = __builtin_wasm_min_f32(vacc01, vmax);
vacc02 = __builtin_wasm_min_f32(vacc02, vmax);
vacc03 = __builtin_wasm_min_f32(vacc03, vmax);
vacc10 = __builtin_wasm_min_f32(vacc10, vmax);
vacc11 = __builtin_wasm_min_f32(vacc11, vmax);
vacc12 = __builtin_wasm_min_f32(vacc12, vmax);
vacc13 = __builtin_wasm_min_f32(vacc13, vmax);
vacc20 = __builtin_wasm_min_f32(vacc20, vmax);
vacc21 = __builtin_wasm_min_f32(vacc21, vmax);
vacc22 = __builtin_wasm_min_f32(vacc22, vmax);
vacc23 = __builtin_wasm_min_f32(vacc23, vmax);
vacc30 = __builtin_wasm_min_f32(vacc30, vmax);
vacc31 = __builtin_wasm_min_f32(vacc31, vmax);
vacc32 = __builtin_wasm_min_f32(vacc32, vmax);
vacc33 = __builtin_wasm_min_f32(vacc33, vmax);
if XNN_LIKELY(nc >= 4) {
c3[0] = vacc30;
c3[1] = vacc31;
c3[2] = vacc32;
c3[3] = vacc33;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2[2] = vacc22;
c2[3] = vacc23;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = vacc30;
c3[1] = vacc31;
vacc30 = vacc32;
c3 += 2;
c2[0] = vacc20;
c2[1] = vacc21;
vacc20 = vacc22;
c2 += 2;
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 6,665 | 28.758929 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x4-relu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_f32_igemm_relu_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
float vacc00 = w[0];
float vacc01 = w[1];
float vacc02 = w[2];
float vacc03 = w[3];
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc22 = vacc02;
float vacc23 = vacc03;
float vacc30 = vacc00;
float vacc31 = vacc01;
float vacc32 = vacc02;
float vacc33 = vacc03;
w += 4;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = w[0];
const float vb1 = w[1];
const float vb2 = w[2];
const float vb3 = w[3];
w += 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc22 = math_muladd_f32(va2, vb2, vacc22);
vacc23 = math_muladd_f32(va2, vb3, vacc23);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
vacc32 = math_muladd_f32(va3, vb2, vacc32);
vacc33 = math_muladd_f32(va3, vb3, vacc33);
k -= sizeof(float);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
vacc00 = math_max_f32(vacc00, 0.0f);
vacc01 = math_max_f32(vacc01, 0.0f);
vacc02 = math_max_f32(vacc02, 0.0f);
vacc03 = math_max_f32(vacc03, 0.0f);
vacc10 = math_max_f32(vacc10, 0.0f);
vacc11 = math_max_f32(vacc11, 0.0f);
vacc12 = math_max_f32(vacc12, 0.0f);
vacc13 = math_max_f32(vacc13, 0.0f);
vacc20 = math_max_f32(vacc20, 0.0f);
vacc21 = math_max_f32(vacc21, 0.0f);
vacc22 = math_max_f32(vacc22, 0.0f);
vacc23 = math_max_f32(vacc23, 0.0f);
vacc30 = math_max_f32(vacc30, 0.0f);
vacc31 = math_max_f32(vacc31, 0.0f);
vacc32 = math_max_f32(vacc32, 0.0f);
vacc33 = math_max_f32(vacc33, 0.0f);
if XNN_LIKELY(nc >= 4) {
c3[0] = vacc30;
c3[1] = vacc31;
c3[2] = vacc32;
c3[3] = vacc33;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2[2] = vacc22;
c2[3] = vacc23;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = vacc30;
c3[1] = vacc31;
vacc30 = vacc32;
c3 += 2;
c2[0] = vacc20;
c2[1] = vacc21;
vacc20 = vacc22;
c2 += 2;
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 5,604 | 26.341463 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x4-relu-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_f32_igemm_relu_ukernel_4x4__wasm(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
float vacc00 = w[0];
float vacc01 = w[1];
float vacc02 = w[2];
float vacc03 = w[3];
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc22 = vacc02;
float vacc23 = vacc03;
float vacc30 = vacc00;
float vacc31 = vacc01;
float vacc32 = vacc02;
float vacc33 = vacc03;
w += 4;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = w[0];
const float vb1 = w[1];
const float vb2 = w[2];
const float vb3 = w[3];
w += 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc22 = math_muladd_f32(va2, vb2, vacc22);
vacc23 = math_muladd_f32(va2, vb3, vacc23);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
vacc32 = math_muladd_f32(va3, vb2, vacc32);
vacc33 = math_muladd_f32(va3, vb3, vacc33);
k -= sizeof(float);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
vacc00 = __builtin_wasm_max_f32(vacc00, 0.0f);
vacc01 = __builtin_wasm_max_f32(vacc01, 0.0f);
vacc02 = __builtin_wasm_max_f32(vacc02, 0.0f);
vacc03 = __builtin_wasm_max_f32(vacc03, 0.0f);
vacc10 = __builtin_wasm_max_f32(vacc10, 0.0f);
vacc11 = __builtin_wasm_max_f32(vacc11, 0.0f);
vacc12 = __builtin_wasm_max_f32(vacc12, 0.0f);
vacc13 = __builtin_wasm_max_f32(vacc13, 0.0f);
vacc20 = __builtin_wasm_max_f32(vacc20, 0.0f);
vacc21 = __builtin_wasm_max_f32(vacc21, 0.0f);
vacc22 = __builtin_wasm_max_f32(vacc22, 0.0f);
vacc23 = __builtin_wasm_max_f32(vacc23, 0.0f);
vacc30 = __builtin_wasm_max_f32(vacc30, 0.0f);
vacc31 = __builtin_wasm_max_f32(vacc31, 0.0f);
vacc32 = __builtin_wasm_max_f32(vacc32, 0.0f);
vacc33 = __builtin_wasm_max_f32(vacc33, 0.0f);
if XNN_LIKELY(nc >= 4) {
c3[0] = vacc30;
c3[1] = vacc31;
c3[2] = vacc32;
c3[3] = vacc33;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2[2] = vacc22;
c2[3] = vacc23;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = vacc30;
c3[1] = vacc31;
vacc30 = vacc32;
c3 += 2;
c2[0] = vacc20;
c2[1] = vacc21;
vacc20 = vacc22;
c2 += 2;
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 5,762 | 27.112195 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x4-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_f32_igemm_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
float vacc00 = w[0];
float vacc01 = w[1];
float vacc02 = w[2];
float vacc03 = w[3];
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc22 = vacc02;
float vacc23 = vacc03;
float vacc30 = vacc00;
float vacc31 = vacc01;
float vacc32 = vacc02;
float vacc33 = vacc03;
w += 4;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = w[0];
const float vb1 = w[1];
const float vb2 = w[2];
const float vb3 = w[3];
w += 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc22 = math_muladd_f32(va2, vb2, vacc22);
vacc23 = math_muladd_f32(va2, vb3, vacc23);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
vacc32 = math_muladd_f32(va3, vb2, vacc32);
vacc33 = math_muladd_f32(va3, vb3, vacc33);
k -= sizeof(float);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
if XNN_LIKELY(nc >= 4) {
c3[0] = vacc30;
c3[1] = vacc31;
c3[2] = vacc32;
c3[3] = vacc33;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2[2] = vacc22;
c2[3] = vacc23;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = vacc30;
c3[1] = vacc31;
vacc30 = vacc32;
c3 += 2;
c2[0] = vacc20;
c2[1] = vacc21;
vacc20 = vacc22;
c2 += 2;
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 4,946 | 25.174603 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-minmax-aarch64-neonfma-lane-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8__aarch64_neonfma_lane_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t va1 = vld1q_f32(a1); a1 += 4;
const float32x4_t va2 = vld1q_f32(a2); a2 += 4;
const float32x4_t va3 = vld1q_f32(a3); a3 += 4;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, vget_low_f32(va0), 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, vget_low_f32(va1), 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, vget_low_f32(va2), 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, vget_low_f32(va3), 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, vget_low_f32(va0), 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c0, vget_low_f32(va1), 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c0, vget_low_f32(va2), 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c0, vget_low_f32(va3), 0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, vget_low_f32(va0), 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, vget_low_f32(va1), 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, vget_low_f32(va2), 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, vget_low_f32(va3), 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, vget_low_f32(va0), 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c1, vget_low_f32(va1), 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c1, vget_low_f32(va2), 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c1, vget_low_f32(va3), 1);
const float32x4_t vb0123c2 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c2 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c2, vget_high_f32(va0), 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c2, vget_high_f32(va1), 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c2, vget_high_f32(va2), 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c2, vget_high_f32(va3), 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c2, vget_high_f32(va0), 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c2, vget_high_f32(va1), 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c2, vget_high_f32(va2), 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c2, vget_high_f32(va3), 0);
const float32x4_t vb0123c3 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c3 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c3, vget_high_f32(va0), 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c3, vget_high_f32(va1), 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c3, vget_high_f32(va2), 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c3, vget_high_f32(va3), 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c3, vget_high_f32(va0), 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c3, vget_high_f32(va1), 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c3, vget_high_f32(va2), 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c3, vget_high_f32(va3), 1);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 4 * sizeof(void*);
} while (p != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,452 | 37.116935 | 81 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-minmax-aarch64-neonfma-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8__aarch64_neonfma_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0);
const float32x4_t va1 = vld1q_dup_f32(a1);
const float32x4_t va2 = vld1q_dup_f32(a2);
const float32x4_t va3 = vld1q_dup_f32(a3);
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
}
p -= 4 * sizeof(void*);
} while (p != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,534 | 33.56422 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-minmax-avx-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8__avx_broadcast(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(w);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const __m256 vb01234567 = _mm256_load_ps(w);
w += 8;
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
k -= sizeof(float);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 5,678 | 30.032787 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-minmax-fma3-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8__fma3_broadcast(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(w);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const __m256 vb01234567 = _mm256_load_ps(w);
w += 8;
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
k -= sizeof(float);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 5,627 | 29.754098 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-minmax-neon-dup-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8__neon_dup_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t va1 = vld1q_f32(a1); a1 += 4;
const float32x4_t va2 = vld1q_f32(a2); a2 += 4;
const float32x4_t va3 = vld1q_f32(a3); a3 += 4;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t va0c0 = vdupq_lane_f32(vget_low_f32(va0), 0);
const float32x4_t va1c0 = vdupq_lane_f32(vget_low_f32(va1), 0);
const float32x4_t va2c0 = vdupq_lane_f32(vget_low_f32(va2), 0);
const float32x4_t va3c0 = vdupq_lane_f32(vget_low_f32(va3), 0);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c0, vb4567c0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
const float32x4_t va0c1 = vdupq_lane_f32(vget_low_f32(va0), 1);
const float32x4_t va1c1 = vdupq_lane_f32(vget_low_f32(va1), 1);
const float32x4_t va2c1 = vdupq_lane_f32(vget_low_f32(va2), 1);
const float32x4_t va3c1 = vdupq_lane_f32(vget_low_f32(va3), 1);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c1, vb4567c1);
const float32x4_t vb0123c2 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c2 = vld1q_f32(w); w += 4;
const float32x4_t va0c2 = vdupq_lane_f32(vget_high_f32(va0), 0);
const float32x4_t va1c2 = vdupq_lane_f32(vget_high_f32(va1), 0);
const float32x4_t va2c2 = vdupq_lane_f32(vget_high_f32(va2), 0);
const float32x4_t va3c2 = vdupq_lane_f32(vget_high_f32(va3), 0);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c2, vb0123c2);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c2, vb0123c2);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c2, vb0123c2);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c2, vb0123c2);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c2, vb4567c2);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c2, vb4567c2);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c2, vb4567c2);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c2, vb4567c2);
const float32x4_t vb0123c3 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c3 = vld1q_f32(w); w += 4;
const float32x4_t va0c3 = vdupq_lane_f32(vget_high_f32(va0), 1);
const float32x4_t va1c3 = vdupq_lane_f32(vget_high_f32(va1), 1);
const float32x4_t va2c3 = vdupq_lane_f32(vget_high_f32(va2), 1);
const float32x4_t va3c3 = vdupq_lane_f32(vget_high_f32(va3), 1);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c3, vb0123c3);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c3, vb0123c3);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c3, vb0123c3);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c3, vb0123c3);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c3, vb4567c3);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c3, vb4567c3);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c3, vb4567c3);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c3, vb4567c3);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 4 * sizeof(void*);
} while (p != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,944 | 36.670455 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-minmax-neon-dup-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8__neon_dup_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
const float32x4_t va1c0 = vdupq_lane_f32(va1, 0);
const float32x4_t va2c0 = vdupq_lane_f32(va2, 0);
const float32x4_t va3c0 = vdupq_lane_f32(va3, 0);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c0, vb4567c0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
const float32x4_t va1c1 = vdupq_lane_f32(va1, 1);
const float32x4_t va2c1 = vdupq_lane_f32(va2, 1);
const float32x4_t va3c1 = vdupq_lane_f32(va3, 1);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c1, vb4567c1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0);
const float32x4_t va1 = vld1q_dup_f32(a1);
const float32x4_t va2 = vld1q_dup_f32(a2);
const float32x4_t va3 = vld1q_dup_f32(a3);
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
}
p -= 4 * sizeof(void*);
} while (p != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,922 | 34.057522 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-minmax-neon-lane-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8__neon_lane_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t va1 = vld1q_f32(a1); a1 += 4;
const float32x4_t va2 = vld1q_f32(a2); a2 += 4;
const float32x4_t va3 = vld1q_f32(a3); a3 += 4;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, vget_low_f32(va0), 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c0, vget_low_f32(va1), 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c0, vget_low_f32(va2), 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c0, vget_low_f32(va3), 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, vget_low_f32(va0), 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c0, vget_low_f32(va1), 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c0, vget_low_f32(va2), 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c0, vget_low_f32(va3), 0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, vget_low_f32(va0), 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c1, vget_low_f32(va1), 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c1, vget_low_f32(va2), 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c1, vget_low_f32(va3), 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, vget_low_f32(va0), 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c1, vget_low_f32(va1), 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c1, vget_low_f32(va2), 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c1, vget_low_f32(va3), 1);
const float32x4_t vb0123c2 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c2 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c2, vget_high_f32(va0), 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c2, vget_high_f32(va1), 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c2, vget_high_f32(va2), 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c2, vget_high_f32(va3), 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c2, vget_high_f32(va0), 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c2, vget_high_f32(va1), 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c2, vget_high_f32(va2), 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c2, vget_high_f32(va3), 0);
const float32x4_t vb0123c3 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c3 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c3, vget_high_f32(va0), 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c3, vget_high_f32(va1), 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c3, vget_high_f32(va2), 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c3, vget_high_f32(va3), 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c3, vget_high_f32(va0), 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c3, vget_high_f32(va1), 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c3, vget_high_f32(va2), 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c3, vget_high_f32(va3), 1);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 4 * sizeof(void*);
} while (p != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,441 | 37.072581 | 81 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-minmax-neon-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8__neon_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0);
const float32x4_t va1 = vld1q_dup_f32(a1);
const float32x4_t va2 = vld1q_dup_f32(a2);
const float32x4_t va3 = vld1q_dup_f32(a3);
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
}
p -= 4 * sizeof(void*);
} while (p != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,523 | 33.513761 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-minmax-neonfma-dup-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8__neonfma_dup_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t va1 = vld1q_f32(a1); a1 += 4;
const float32x4_t va2 = vld1q_f32(a2); a2 += 4;
const float32x4_t va3 = vld1q_f32(a3); a3 += 4;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t va0c0 = vdupq_lane_f32(vget_low_f32(va0), 0);
const float32x4_t va1c0 = vdupq_lane_f32(vget_low_f32(va1), 0);
const float32x4_t va2c0 = vdupq_lane_f32(vget_low_f32(va2), 0);
const float32x4_t va3c0 = vdupq_lane_f32(vget_low_f32(va3), 0);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c0, vb4567c0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
const float32x4_t va0c1 = vdupq_lane_f32(vget_low_f32(va0), 1);
const float32x4_t va1c1 = vdupq_lane_f32(vget_low_f32(va1), 1);
const float32x4_t va2c1 = vdupq_lane_f32(vget_low_f32(va2), 1);
const float32x4_t va3c1 = vdupq_lane_f32(vget_low_f32(va3), 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c1, vb4567c1);
const float32x4_t vb0123c2 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c2 = vld1q_f32(w); w += 4;
const float32x4_t va0c2 = vdupq_lane_f32(vget_high_f32(va0), 0);
const float32x4_t va1c2 = vdupq_lane_f32(vget_high_f32(va1), 0);
const float32x4_t va2c2 = vdupq_lane_f32(vget_high_f32(va2), 0);
const float32x4_t va3c2 = vdupq_lane_f32(vget_high_f32(va3), 0);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c2, vb0123c2);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c2, vb0123c2);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c2, vb0123c2);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c2, vb0123c2);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c2, vb4567c2);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c2, vb4567c2);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c2, vb4567c2);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c2, vb4567c2);
const float32x4_t vb0123c3 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c3 = vld1q_f32(w); w += 4;
const float32x4_t va0c3 = vdupq_lane_f32(vget_high_f32(va0), 1);
const float32x4_t va1c3 = vdupq_lane_f32(vget_high_f32(va1), 1);
const float32x4_t va2c3 = vdupq_lane_f32(vget_high_f32(va2), 1);
const float32x4_t va3c3 = vdupq_lane_f32(vget_high_f32(va3), 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c3, vb0123c3);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c3, vb0123c3);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c3, vb0123c3);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c3, vb0123c3);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c3, vb4567c3);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c3, vb4567c3);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c3, vb4567c3);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c3, vb4567c3);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 4 * sizeof(void*);
} while (p != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,947 | 36.681818 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-minmax-neonfma-dup-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8__neonfma_dup_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
const float32x4_t va1c0 = vdupq_lane_f32(va1, 0);
const float32x4_t va2c0 = vdupq_lane_f32(va2, 0);
const float32x4_t va3c0 = vdupq_lane_f32(va3, 0);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c0, vb4567c0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
const float32x4_t va1c1 = vdupq_lane_f32(va1, 1);
const float32x4_t va2c1 = vdupq_lane_f32(va2, 1);
const float32x4_t va3c1 = vdupq_lane_f32(va3, 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c1, vb4567c1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0);
const float32x4_t va1 = vld1q_dup_f32(a1);
const float32x4_t va2 = vld1q_dup_f32(a2);
const float32x4_t va3 = vld1q_dup_f32(a3);
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
}
p -= 4 * sizeof(void*);
} while (p != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,925 | 34.070796 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-minmax-sse-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/sse-dup.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8__sse_dup(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m128 vacc0x0123 = _mm_load_ps(w);
__m128 vacc0x4567 = _mm_load_ps(w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
const __m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128 va0c0000 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 va1c0000 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 va2c0000 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 va3c0000 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
const __m128 va0c1111 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 va1c1111 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 va2c1111 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 va3c1111 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
const __m128 va0c2222 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 va1c2222 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 va2c2222 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 va3c2222 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
k -= sizeof(float);
} while (k != 0);
}
p -= 4 * sizeof(void*);
} while (p != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 10,699 | 36.676056 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-minmax-sse-load1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/sse-load1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8__sse_load1(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m128 vacc0x0123 = _mm_load_ps(w);
__m128 vacc0x4567 = _mm_load_ps(w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
k -= sizeof(float);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 6,048 | 29.245 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-minmax-sse2-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/sse-dup.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m128 vacc0x0123 = _mm_load_ps(w);
__m128 vacc0x4567 = _mm_load_ps(w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
const __m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va3c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va3c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va3c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
k -= sizeof(float);
} while (k != 0);
}
p -= 4 * sizeof(void*);
} while (p != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 11,108 | 38.116197 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-minmax-wasmrelaxedsimd-fma-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8__wasmrelaxedsimd_fma_loadsplat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
k -= sizeof(float);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,732 | 32.665 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-minmax-wasmrelaxedsimd-fma-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8__wasmrelaxedsimd_fma_splat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb0123c0, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb4567c0, vacc3x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb0123c1, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb4567c1, vacc3x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb0123c2, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb4567c2, vacc3x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb0123c3, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb4567c3, vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 4 * sizeof(void*);
} while (p != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,471 | 39.971429 | 84 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-minmax-wasmrelaxedsimd-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8__wasmrelaxedsimd_loadsplat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
k -= sizeof(float);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,704 | 32.525 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-minmax-wasmrelaxedsimd-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8__wasmrelaxedsimd_splat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 4 * sizeof(void*);
} while (p != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,347 | 39.528571 | 81 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-minmax-wasmsimd-arm-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
k -= sizeof(float);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,413 | 31.07 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-minmax-wasmsimd-arm-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 4 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,056 | 38.489286 | 81 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-minmax-wasmsimd-x86-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
k -= sizeof(float);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,429 | 31.15 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-minmax-wasmsimd-x86-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 4 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,072 | 38.546429 | 81 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-relu-wasmrelaxedsimd-fma-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_relu_ukernel_4x8__wasmrelaxedsimd_fma_loadsplat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
k -= sizeof(float);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,955 | 30.347368 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-relu-wasmrelaxedsimd-fma-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_relu_ukernel_4x8__wasmrelaxedsimd_fma_splat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb0123c0, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb4567c0, vacc3x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb0123c1, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb4567c1, vacc3x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb0123c2, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb4567c2, vacc3x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb0123c3, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb4567c3, vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 4 * sizeof(void*);
} while (p != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,694 | 38.611111 | 84 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-relu-wasmsimd-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_loadsplat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
k -= sizeof(float);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,920 | 30.163158 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-relu-wasmsimd-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 4 * sizeof(void*);
} while (p != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,563 | 38.125926 | 81 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-wasmrelaxedsimd-fma-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_ukernel_4x8__wasmrelaxedsimd_fma_loadsplat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
k -= sizeof(float);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,485 | 29.309392 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-wasmrelaxedsimd-fma-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_ukernel_4x8__wasmrelaxedsimd_fma_splat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb0123c0, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb4567c0, vacc3x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb0123c1, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb4567c1, vacc3x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb0123c2, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb4567c2, vacc3x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb0123c3, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb4567c3, vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 4 * sizeof(void*);
} while (p != 0);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,224 | 38.176245 | 84 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-wasmsimd-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_ukernel_4x8__wasmsimd_loadsplat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
k -= sizeof(float);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,450 | 29.116022 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8-wasmsimd-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_ukernel_4x8__wasmsimd_splat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 4 * sizeof(void*);
} while (p != 0);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,093 | 37.67433 | 81 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8s4-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/sse-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8s4__sse(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m128 vacc0x0123 = _mm_load_ps(w);
__m128 vacc0x4567 = _mm_load_ps(w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
__m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
__m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
__m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
__m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c3));
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
__m128 va0 = _mm_loadu_ps(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
__m128 va1 = _mm_loadu_ps(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
__m128 va2 = _mm_loadu_ps(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
__m128 va3 = _mm_loadu_ps(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va0), vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va1), vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va2), vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va3), vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va0), vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va1), vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va2), vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va3), vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va0), vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va1), vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va2), vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va3), vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va0), vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va1), vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va2), vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va3), vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va0), vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va1), vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va2), vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va3), vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va0), vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va1), vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va2), vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va3), vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va0), vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va1), vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va2), vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va3), vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va0), vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va1), vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va2), vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va3), vb4567c3));
w += 32;
}
p -= 4 * sizeof(void*);
} while (p != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 14,670 | 43.323263 | 128 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8s4-minmax-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8s4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c0, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c0, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c1, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c1, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c2, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c2, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c3, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c3, vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc3x4567);
w += 32;
}
p -= 4 * sizeof(void*);
} while (p != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 15,594 | 45.972892 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8s4-minmax-wasmrelaxedsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8s4__wasmrelaxedsimd(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
w += 32;
}
p -= 4 * sizeof(void*);
} while (p != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 15,398 | 45.38253 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8s4-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8s4__wasmsimd_arm(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
w += 32;
}
p -= 4 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 15,107 | 44.506024 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8s4-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_4x8s4__wasmsimd_x86(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
w += 32;
}
p -= 4 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 15,123 | 44.554217 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8s4-relu-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_relu_ukernel_4x8s4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c0, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c0, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c1, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c1, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c2, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c2, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c3, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c3, vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc3x4567);
w += 32;
}
p -= 4 * sizeof(void*);
} while (p != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,817 | 45.018634 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8s4-relu-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_relu_ukernel_4x8s4__wasmsimd(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
w += 32;
}
p -= 4 * sizeof(void*);
} while (p != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,614 | 44.388199 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8s4-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_ukernel_4x8s4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c0, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c0, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c1, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c1, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c2, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c2, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c3, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c3, vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc3x4567);
w += 32;
}
p -= 4 * sizeof(void*);
} while (p != 0);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,347 | 44.840256 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-4x8s4-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_ukernel_4x8s4__wasmsimd(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
w += 32;
}
p -= 4 * sizeof(void*);
} while (p != 0);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,144 | 44.191693 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x16-minmax-avx-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_5x16__avx_broadcast(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(w);
__m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc4x01234567 = vacc0x01234567;
__m256 vacc4x89ABCDEF = vacc0x89ABCDEF;
w += 16;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
do {
const __m256 vb01234567 = _mm256_load_ps(w);
const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
w += 16;
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF));
vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
vacc1x89ABCDEF = _mm256_add_ps(vacc1x89ABCDEF, _mm256_mul_ps(va1, vb89ABCDEF));
vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
vacc2x89ABCDEF = _mm256_add_ps(vacc2x89ABCDEF, _mm256_mul_ps(va2, vb89ABCDEF));
vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
vacc3x89ABCDEF = _mm256_add_ps(vacc3x89ABCDEF, _mm256_mul_ps(va3, vb89ABCDEF));
vacc4x01234567 = _mm256_add_ps(vacc4x01234567, _mm256_mul_ps(va4, vb01234567));
vacc4x89ABCDEF = _mm256_add_ps(vacc4x89ABCDEF, _mm256_mul_ps(va4, vb89ABCDEF));
k -= sizeof(float);
} while (k != 0);
p -= 5 * sizeof(void*);
} while (p != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_max_ps(vmin, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_max_ps(vmin, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_max_ps(vmin, vacc3x89ABCDEF);
vacc4x89ABCDEF = _mm256_max_ps(vmin, vacc4x89ABCDEF);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_min_ps(vmax, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_min_ps(vmax, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_min_ps(vmax, vacc3x89ABCDEF);
vacc4x89ABCDEF = _mm256_min_ps(vmax, vacc4x89ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm256_storeu_ps(c4, vacc4x01234567);
_mm256_storeu_ps(c4 + 8, vacc4x89ABCDEF);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
_mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
if (nc & 8) {
_mm256_storeu_ps(c4, vacc4x01234567);
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c0, vacc0x01234567);
vacc4x01234567 = vacc4x89ABCDEF;
vacc3x01234567 = vacc3x89ABCDEF;
vacc2x01234567 = vacc2x89ABCDEF;
vacc1x01234567 = vacc1x89ABCDEF;
vacc0x01234567 = vacc0x89ABCDEF;
c4 += 8;
c3 += 8;
c2 += 8;
c1 += 8;
c0 += 8;
}
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 8,810 | 33.826087 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x16-minmax-avx512f-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/avx512-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_igemm_minmax_ukernel_5x16__avx512f_broadcast(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
do {
__m512 vacc0x0123456789ABCDEF = _mm512_load_ps(w);
__m512 vacc1x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc2x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc3x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc4x0123456789ABCDEF = vacc0x0123456789ABCDEF;
w += 16;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
do {
const __m512 vb0123456789ABCDEF = _mm512_load_ps(w);
w += 16;
const __m512 va0 = _mm512_set1_ps(*a0);
vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
const __m512 va1 = _mm512_set1_ps(*a1);
vacc1x0123456789ABCDEF = _mm512_fmadd_ps(va1, vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
const __m512 va2 = _mm512_set1_ps(*a2);
vacc2x0123456789ABCDEF = _mm512_fmadd_ps(va2, vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
const __m512 va3 = _mm512_set1_ps(*a3);
vacc3x0123456789ABCDEF = _mm512_fmadd_ps(va3, vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
const __m512 va4 = _mm512_set1_ps(*a4);
vacc4x0123456789ABCDEF = _mm512_fmadd_ps(va4, vb0123456789ABCDEF, vacc4x0123456789ABCDEF);
a0 += 1;
a1 += 1;
a2 += 1;
a3 += 1;
a4 += 1;
k -= sizeof(float);
} while (k != 0);
p -= 5 * sizeof(void*);
} while (p != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
vacc0x0123456789ABCDEF = _mm512_max_ps(vmin, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_max_ps(vmin, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_max_ps(vmin, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_max_ps(vmin, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_max_ps(vmin, vacc4x0123456789ABCDEF);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
vacc0x0123456789ABCDEF = _mm512_min_ps(vmax, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_min_ps(vmax, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_min_ps(vmax, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_min_ps(vmax, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_min_ps(vmax, vacc4x0123456789ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm512_storeu_ps(c4, vacc4x0123456789ABCDEF);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
if (nc & 15) {
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
_mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF);
_mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
_mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
_mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
_mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
}
nc = 0;
}
} while (nc != 0);
}
| 5,792 | 33.076471 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x16-minmax-fma3-broadcast-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/prefetch.h>
void xnn_f32_igemm_minmax_ukernel_5x16__fma3_broadcast_prfm(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(w);
__m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc4x01234567 = vacc0x01234567;
__m256 vacc4x89ABCDEF = vacc0x89ABCDEF;
w += 16;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
do {
const __m256 vb01234567 = _mm256_load_ps(w);
const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
w += 16;
xnn_prefetch_to_l1(w + 224);
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEF, vacc3x89ABCDEF);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
vacc4x89ABCDEF = _mm256_fmadd_ps(va4, vb89ABCDEF, vacc4x89ABCDEF);
k -= sizeof(float);
} while (k != 0);
p -= 5 * sizeof(void*);
} while (p != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_max_ps(vmin, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_max_ps(vmin, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_max_ps(vmin, vacc3x89ABCDEF);
vacc4x89ABCDEF = _mm256_max_ps(vmin, vacc4x89ABCDEF);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_min_ps(vmax, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_min_ps(vmax, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_min_ps(vmax, vacc3x89ABCDEF);
vacc4x89ABCDEF = _mm256_min_ps(vmax, vacc4x89ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm256_storeu_ps(c4, vacc4x01234567);
_mm256_storeu_ps(c4 + 8, vacc4x89ABCDEF);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
_mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
if (nc & 8) {
_mm256_storeu_ps(c4, vacc4x01234567);
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c0, vacc0x01234567);
vacc4x01234567 = vacc4x89ABCDEF;
vacc3x01234567 = vacc3x89ABCDEF;
vacc2x01234567 = vacc2x89ABCDEF;
vacc1x01234567 = vacc1x89ABCDEF;
vacc0x01234567 = vacc0x89ABCDEF;
c4 += 8;
c3 += 8;
c2 += 8;
c1 += 8;
c0 += 8;
}
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 8,753 | 33.329412 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x16-minmax-fma3-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_5x16__fma3_broadcast(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(w);
__m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc4x01234567 = vacc0x01234567;
__m256 vacc4x89ABCDEF = vacc0x89ABCDEF;
w += 16;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
do {
const __m256 vb01234567 = _mm256_load_ps(w);
const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
w += 16;
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEF, vacc3x89ABCDEF);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
vacc4x89ABCDEF = _mm256_fmadd_ps(va4, vb89ABCDEF, vacc4x89ABCDEF);
k -= sizeof(float);
} while (k != 0);
p -= 5 * sizeof(void*);
} while (p != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_max_ps(vmin, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_max_ps(vmin, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_max_ps(vmin, vacc3x89ABCDEF);
vacc4x89ABCDEF = _mm256_max_ps(vmin, vacc4x89ABCDEF);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_min_ps(vmax, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_min_ps(vmax, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_min_ps(vmax, vacc3x89ABCDEF);
vacc4x89ABCDEF = _mm256_min_ps(vmax, vacc4x89ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm256_storeu_ps(c4, vacc4x01234567);
_mm256_storeu_ps(c4 + 8, vacc4x89ABCDEF);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
_mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
if (nc & 8) {
_mm256_storeu_ps(c4, vacc4x01234567);
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c0, vacc0x01234567);
vacc4x01234567 = vacc4x89ABCDEF;
vacc3x01234567 = vacc3x89ABCDEF;
vacc2x01234567 = vacc2x89ABCDEF;
vacc1x01234567 = vacc1x89ABCDEF;
vacc0x01234567 = vacc0x89ABCDEF;
c4 += 8;
c3 += 8;
c2 += 8;
c1 += 8;
c0 += 8;
}
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 8,681 | 33.316206 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8-minmax-avx-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_5x8__avx_broadcast(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(w);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc4x01234567 = vacc0x01234567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
do {
const __m256 vb01234567 = _mm256_load_ps(w);
w += 8;
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
vacc4x01234567 = _mm256_add_ps(vacc4x01234567, _mm256_mul_ps(va4, vb01234567));
k -= sizeof(float);
} while (k != 0);
p -= 5 * sizeof(void*);
} while (p != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c4, vacc4x01234567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 6,711 | 31.269231 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8-minmax-fma3-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_5x8__fma3_broadcast(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(w);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc4x01234567 = vacc0x01234567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
do {
const __m256 vb01234567 = _mm256_load_ps(w);
w += 8;
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
k -= sizeof(float);
} while (k != 0);
p -= 5 * sizeof(void*);
} while (p != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c4, vacc4x01234567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 6,647 | 30.961538 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8-minmax-sse-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/sse-dup.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_5x8__sse_dup(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
do {
__m128 vacc0x0123 = _mm_load_ps(w);
__m128 vacc0x4567 = _mm_load_ps(w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
__m128 vacc4x0123 = vacc0x0123;
__m128 vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
const __m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128 va4 = _mm_loadu_ps(a4);
a4 += 4;
const __m128 va0c0000 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 va1c0000 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 va2c0000 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 va3c0000 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 va4c0000 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c0000, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c0000, vb4567c0));
const __m128 va0c1111 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 va1c1111 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 va2c1111 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 va3c1111 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 va4c1111 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c1111, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c1111, vb4567c1));
const __m128 va0c2222 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 va1c2222 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 va2c2222 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 va3c2222 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 va4c2222 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c2222, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c2222, vb4567c2));
const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va4c3333 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c3333, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c3333, vb4567c3));
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
const __m128 va4 = _mm_load1_ps(a4);
a4 += 1;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
k -= sizeof(float);
} while (k != 0);
}
p -= 5 * sizeof(void*);
} while (p != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 12,833 | 38.247706 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8-minmax-sse-load1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/sse-load1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_5x8__sse_load1(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
do {
__m128 vacc0x0123 = _mm_load_ps(w);
__m128 vacc0x4567 = _mm_load_ps(w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
__m128 vacc4x0123 = vacc0x0123;
__m128 vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
do {
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
const __m128 va4 = _mm_load1_ps(a4);
a4 += 1;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
k -= sizeof(float);
} while (k != 0);
p -= 5 * sizeof(void*);
} while (p != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 7,164 | 30.28821 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8-minmax-sse2-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/sse-dup.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
do {
__m128 vacc0x0123 = _mm_load_ps(w);
__m128 vacc0x4567 = _mm_load_ps(w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
__m128 vacc4x0123 = vacc0x0123;
__m128 vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
const __m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128 va4 = _mm_loadu_ps(a4);
a4 += 4;
const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va3c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va4c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c0000, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c0000, vb4567c0));
const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va3c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va4c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c1111, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c1111, vb4567c1));
const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va3c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va4c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c2222, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c2222, vb4567c2));
const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va4c3333 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c3333, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c3333, vb4567c3));
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
const __m128 va4 = _mm_load1_ps(a4);
a4 += 1;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
k -= sizeof(float);
} while (k != 0);
}
p -= 5 * sizeof(void*);
} while (p != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 13,344 | 39.810398 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8-minmax-wasmrelaxedsimd-fma-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_5x8__wasmrelaxedsimd_fma_loadsplat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567);
k -= sizeof(float);
} while (k != 0);
p -= 5 * sizeof(void*);
} while (p != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,001 | 33.943231 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8-minmax-wasmrelaxedsimd-fma-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_5x8__wasmrelaxedsimd_fma_splat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb0123c0, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb4567c0, vacc4x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb0123c1, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb4567c1, vacc4x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb0123c2, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb4567c2, vacc4x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb0123c3, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb4567c3, vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 5 * sizeof(void*);
} while (p != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 13,776 | 41.653251 | 84 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8-minmax-wasmrelaxedsimd-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_5x8__wasmrelaxedsimd_loadsplat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
k -= sizeof(float);
} while (k != 0);
p -= 5 * sizeof(void*);
} while (p != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,967 | 33.79476 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8-minmax-wasmrelaxedsimd-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_5x8__wasmrelaxedsimd_splat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 5 * sizeof(void*);
} while (p != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 13,622 | 41.176471 | 81 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8-minmax-wasmsimd-arm-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
k -= sizeof(float);
} while (k != 0);
p -= 5 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_max(vmin, vacc4x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_max(vmin, vacc4x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_min(vmax, vacc4x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_min(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,604 | 32.209607 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8-minmax-wasmsimd-arm-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 5 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_max(vmin, vacc4x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_max(vmin, vacc4x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_min(vmax, vacc4x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_min(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 13,259 | 40.052632 | 81 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8-minmax-wasmsimd-x86-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
k -= sizeof(float);
} while (k != 0);
p -= 5 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmax(vmin, vacc4x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmax(vmin, vacc4x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmin(vmax, vacc4x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmin(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,624 | 32.296943 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8-minmax-wasmsimd-x86-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 5 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmax(vmin, vacc4x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmax(vmin, vacc4x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmin(vmax, vacc4x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmin(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 13,279 | 40.114551 | 81 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8-relu-wasmrelaxedsimd-fma-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_relu_ukernel_5x8__wasmrelaxedsimd_fma_loadsplat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567);
k -= sizeof(float);
} while (k != 0);
p -= 5 * sizeof(void*);
} while (p != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,052 | 31.502304 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8-relu-wasmrelaxedsimd-fma-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_relu_ukernel_5x8__wasmrelaxedsimd_fma_splat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb0123c0, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb4567c0, vacc4x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb0123c1, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb4567c1, vacc4x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb0123c2, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb4567c2, vacc4x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb0123c3, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb4567c3, vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 5 * sizeof(void*);
} while (p != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,827 | 40.247588 | 84 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8-relu-wasmsimd-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_loadsplat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
k -= sizeof(float);
} while (k != 0);
p -= 5 * sizeof(void*);
} while (p != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,011 | 31.313364 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8-relu-wasmsimd-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 5 * sizeof(void*);
} while (p != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,666 | 39.729904 | 81 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8-wasmrelaxedsimd-fma-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_ukernel_5x8__wasmrelaxedsimd_fma_loadsplat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567);
k -= sizeof(float);
} while (k != 0);
p -= 5 * sizeof(void*);
} while (p != 0);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,478 | 30.451456 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8-wasmrelaxedsimd-fma-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_ukernel_5x8__wasmrelaxedsimd_fma_splat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb0123c0, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb4567c0, vacc4x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb0123c1, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb4567c1, vacc4x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb0123c2, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb4567c2, vacc4x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb0123c3, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb4567c3, vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 5 * sizeof(void*);
} while (p != 0);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,253 | 39.846667 | 84 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8-wasmsimd-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_ukernel_5x8__wasmsimd_loadsplat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
k -= sizeof(float);
} while (k != 0);
p -= 5 * sizeof(void*);
} while (p != 0);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,437 | 30.252427 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8-wasmsimd-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_ukernel_5x8__wasmsimd_splat(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 5 * sizeof(void*);
} while (p != 0);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,092 | 39.31 | 81 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8s4-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/sse-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_5x8s4__sse(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
do {
__m128 vacc0x0123 = _mm_load_ps(w);
__m128 vacc0x4567 = _mm_load_ps(w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
__m128 vacc4x0123 = vacc0x0123;
__m128 vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
__m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
__m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
__m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
__m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
__m128 va4 = _mm_loadu_ps(a4);
a4 += 4;
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c0));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c0));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c1));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c1));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c2));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c2));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c3));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c3));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c3));
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
__m128 va0 = _mm_loadu_ps(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
__m128 va1 = _mm_loadu_ps(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
__m128 va2 = _mm_loadu_ps(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
__m128 va3 = _mm_loadu_ps(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
__m128 va4 = _mm_loadu_ps(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va0), vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va1), vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va2), vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va3), vb0123c0));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va4), vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va0), vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va1), vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va2), vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va3), vb4567c0));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va4), vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va0), vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va1), vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va2), vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va3), vb0123c1));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va4), vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va0), vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va1), vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va2), vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va3), vb4567c1));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va4), vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va0), vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va1), vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va2), vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va3), vb0123c2));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va4), vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va0), vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va1), vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va2), vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va3), vb4567c2));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va4), vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va0), vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va1), vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va2), vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va3), vb0123c3));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va4), vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va0), vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va1), vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va2), vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va3), vb4567c3));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va4), vb4567c3));
w += 32;
}
p -= 5 * sizeof(void*);
} while (p != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 17,727 | 45.408377 | 128 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8s4-minmax-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_5x8s4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c0, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c0, vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c1, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c1, vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c2, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c2, vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c3, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c3, vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
v128_t va4 = wasm_v128_load(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc4x4567);
w += 32;
}
p -= 5 * sizeof(void*);
} while (p != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 18,842 | 48.198433 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8s4-minmax-wasmrelaxedsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_5x8s4__wasmrelaxedsimd(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
v128_t va4 = wasm_v128_load(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567);
w += 32;
}
p -= 5 * sizeof(void*);
} while (p != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 18,598 | 47.561358 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8s4-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_5x8s4__wasmsimd_arm(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
v128_t va4 = wasm_v128_load(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567);
w += 32;
}
p -= 5 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_max(vmin, vacc4x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_max(vmin, vacc4x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_min(vmax, vacc4x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_min(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 18,235 | 46.613577 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8s4-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_5x8s4__wasmsimd_x86(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
v128_t va4 = wasm_v128_load(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567);
w += 32;
}
p -= 5 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmax(vmin, vacc4x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmax(vmin, vacc4x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmin(vmax, vacc4x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmin(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 18,255 | 46.665796 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8s4-relu-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_relu_ukernel_5x8s4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c0, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c0, vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c1, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c1, vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c2, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c2, vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c3, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c3, vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
v128_t va4 = wasm_v128_load(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc4x4567);
w += 32;
}
p -= 5 * sizeof(void*);
} while (p != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 17,893 | 47.231806 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8s4-relu-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_relu_ukernel_5x8s4__wasmsimd(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
v128_t va4 = wasm_v128_load(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567);
w += 32;
}
p -= 5 * sizeof(void*);
} while (p != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 17,642 | 46.555256 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8s4-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_ukernel_5x8s4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c0, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c0, vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c1, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c1, vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c2, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c2, vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c3, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c3, vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
v128_t va4 = wasm_v128_load(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc4x4567);
w += 32;
}
p -= 5 * sizeof(void*);
} while (p != 0);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 17,319 | 47.111111 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-5x8s4-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_ukernel_5x8s4__wasmsimd(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
v128_t va4 = wasm_v128_load(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567);
w += 32;
}
p -= 5 * sizeof(void*);
} while (p != 0);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 17,068 | 46.413889 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x16-minmax-avx512f-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/avx512-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_igemm_minmax_ukernel_6x16__avx512f_broadcast(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
__m512 vacc0x0123456789ABCDEF = _mm512_load_ps(w);
__m512 vacc1x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc2x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc3x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc4x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc5x0123456789ABCDEF = vacc0x0123456789ABCDEF;
w += 16;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
do {
const __m512 vb0123456789ABCDEF = _mm512_load_ps(w);
w += 16;
const __m512 va0 = _mm512_set1_ps(*a0);
vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
const __m512 va1 = _mm512_set1_ps(*a1);
vacc1x0123456789ABCDEF = _mm512_fmadd_ps(va1, vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
const __m512 va2 = _mm512_set1_ps(*a2);
vacc2x0123456789ABCDEF = _mm512_fmadd_ps(va2, vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
const __m512 va3 = _mm512_set1_ps(*a3);
vacc3x0123456789ABCDEF = _mm512_fmadd_ps(va3, vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
const __m512 va4 = _mm512_set1_ps(*a4);
vacc4x0123456789ABCDEF = _mm512_fmadd_ps(va4, vb0123456789ABCDEF, vacc4x0123456789ABCDEF);
const __m512 va5 = _mm512_set1_ps(*a5);
vacc5x0123456789ABCDEF = _mm512_fmadd_ps(va5, vb0123456789ABCDEF, vacc5x0123456789ABCDEF);
a0 += 1;
a1 += 1;
a2 += 1;
a3 += 1;
a4 += 1;
a5 += 1;
k -= sizeof(float);
} while (k != 0);
p -= 6 * sizeof(void*);
} while (p != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
vacc0x0123456789ABCDEF = _mm512_max_ps(vmin, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_max_ps(vmin, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_max_ps(vmin, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_max_ps(vmin, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_max_ps(vmin, vacc4x0123456789ABCDEF);
vacc5x0123456789ABCDEF = _mm512_max_ps(vmin, vacc5x0123456789ABCDEF);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
vacc0x0123456789ABCDEF = _mm512_min_ps(vmax, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_min_ps(vmax, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_min_ps(vmax, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_min_ps(vmax, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_min_ps(vmax, vacc4x0123456789ABCDEF);
vacc5x0123456789ABCDEF = _mm512_min_ps(vmax, vacc5x0123456789ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm512_storeu_ps(c5, vacc5x0123456789ABCDEF);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
_mm512_storeu_ps(c4, vacc4x0123456789ABCDEF);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
if (nc & 15) {
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
_mm512_mask_storeu_ps(c5, vmask, vacc5x0123456789ABCDEF);
_mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF);
_mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
_mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
_mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
_mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
}
nc = 0;
}
} while (nc != 0);
}
| 6,607 | 34.148936 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x2-minmax-aarch64-neonfma-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/MRx2-neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_6x2__aarch64_neonfma_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
float32x2_t vacc0x01 = vld1_f32(w); w += 2;
float32x2_t vacc1x01 = vacc0x01;
float32x2_t vacc2x01 = vacc0x01;
float32x2_t vacc3x01 = vacc0x01;
float32x2_t vacc4x01 = vacc0x01;
float32x2_t vacc5x01 = vacc0x01;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const float32x2_t va5 = vld1_f32(a5); a5 += 2;
const float32x2_t vb01c0 = vld1_f32(w); w += 2;
#if XNN_ARCH_ARM64
vacc0x01 = vfma_lane_f32(vacc0x01, vb01c0, va0, 0);
vacc1x01 = vfma_lane_f32(vacc1x01, vb01c0, va1, 0);
vacc2x01 = vfma_lane_f32(vacc2x01, vb01c0, va2, 0);
vacc3x01 = vfma_lane_f32(vacc3x01, vb01c0, va3, 0);
vacc4x01 = vfma_lane_f32(vacc4x01, vb01c0, va4, 0);
vacc5x01 = vfma_lane_f32(vacc5x01, vb01c0, va5, 0);
#else
const float32x2_t va0c0 = vdup_lane_f32(va0, 0);
const float32x2_t va1c0 = vdup_lane_f32(va1, 0);
const float32x2_t va2c0 = vdup_lane_f32(va2, 0);
const float32x2_t va3c0 = vdup_lane_f32(va3, 0);
const float32x2_t va4c0 = vdup_lane_f32(va4, 0);
const float32x2_t va5c0 = vdup_lane_f32(va5, 0);
vacc0x01 = vfma_f32(vacc0x01, va0c0, vb01c0);
vacc1x01 = vfma_f32(vacc1x01, va1c0, vb01c0);
vacc2x01 = vfma_f32(vacc2x01, va2c0, vb01c0);
vacc3x01 = vfma_f32(vacc3x01, va3c0, vb01c0);
vacc4x01 = vfma_f32(vacc4x01, va4c0, vb01c0);
vacc5x01 = vfma_f32(vacc5x01, va5c0, vb01c0);
#endif
const float32x2_t vb01c1 = vld1_f32(w); w += 2;
#if XNN_ARCH_ARM64
vacc0x01 = vfma_lane_f32(vacc0x01, vb01c1, va0, 1);
vacc1x01 = vfma_lane_f32(vacc1x01, vb01c1, va1, 1);
vacc2x01 = vfma_lane_f32(vacc2x01, vb01c1, va2, 1);
vacc3x01 = vfma_lane_f32(vacc3x01, vb01c1, va3, 1);
vacc4x01 = vfma_lane_f32(vacc4x01, vb01c1, va4, 1);
vacc5x01 = vfma_lane_f32(vacc5x01, vb01c1, va5, 1);
#else
const float32x2_t va0c1 = vdup_lane_f32(va0, 1);
const float32x2_t va1c1 = vdup_lane_f32(va1, 1);
const float32x2_t va2c1 = vdup_lane_f32(va2, 1);
const float32x2_t va3c1 = vdup_lane_f32(va3, 1);
const float32x2_t va4c1 = vdup_lane_f32(va4, 1);
const float32x2_t va5c1 = vdup_lane_f32(va5, 1);
vacc0x01 = vfma_f32(vacc0x01, va0c1, vb01c1);
vacc1x01 = vfma_f32(vacc1x01, va1c1, vb01c1);
vacc2x01 = vfma_f32(vacc2x01, va2c1, vb01c1);
vacc3x01 = vfma_f32(vacc3x01, va3c1, vb01c1);
vacc4x01 = vfma_f32(vacc4x01, va4c1, vb01c1);
vacc5x01 = vfma_f32(vacc5x01, va5c1, vb01c1);
#endif
}
if XNN_UNLIKELY(k != 0) {
const float32x2_t va0 = vld1_dup_f32(a0);
const float32x2_t va1 = vld1_dup_f32(a1);
const float32x2_t va2 = vld1_dup_f32(a2);
const float32x2_t va3 = vld1_dup_f32(a3);
const float32x2_t va4 = vld1_dup_f32(a4);
const float32x2_t va5 = vld1_dup_f32(a5);
const float32x2_t vb01 = vld1_f32(w); w += 2;
vacc0x01 = vfma_f32(vacc0x01, va0, vb01);
vacc1x01 = vfma_f32(vacc1x01, va1, vb01);
vacc2x01 = vfma_f32(vacc2x01, va2, vb01);
vacc3x01 = vfma_f32(vacc3x01, va3, vb01);
vacc4x01 = vfma_f32(vacc4x01, va4, vb01);
vacc5x01 = vfma_f32(vacc5x01, va5, vb01);
}
p -= 6 * sizeof(void*);
} while (p != 0);
const float32x2_t vmax = vld1_dup_f32(¶ms->scalar.max);
vacc0x01 = vmin_f32(vacc0x01, vmax);
vacc1x01 = vmin_f32(vacc1x01, vmax);
vacc2x01 = vmin_f32(vacc2x01, vmax);
vacc3x01 = vmin_f32(vacc3x01, vmax);
vacc4x01 = vmin_f32(vacc4x01, vmax);
vacc5x01 = vmin_f32(vacc5x01, vmax);
const float32x2_t vmin = vld1_dup_f32(¶ms->scalar.min);
vacc0x01 = vmax_f32(vacc0x01, vmin);
vacc1x01 = vmax_f32(vacc1x01, vmin);
vacc2x01 = vmax_f32(vacc2x01, vmin);
vacc3x01 = vmax_f32(vacc3x01, vmin);
vacc4x01 = vmax_f32(vacc4x01, vmin);
vacc5x01 = vmax_f32(vacc5x01, vmin);
if XNN_LIKELY(nc >= 2) {
vst1_f32(c5, vacc5x01);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1_f32(c4, vacc4x01);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1_f32(c3, vacc3x01);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1_f32(c2, vacc2x01);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1_f32(c1, vacc1x01);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1_f32(c0, vacc0x01);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
assert(nc == 1);
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
nc = 0;
}
} while (nc != 0);
}
| 7,912 | 33.554585 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x2-minmax-neon-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/MRx2-neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_6x2__neon_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
float32x2_t vacc0x01 = vld1_f32(w); w += 2;
float32x2_t vacc1x01 = vacc0x01;
float32x2_t vacc2x01 = vacc0x01;
float32x2_t vacc3x01 = vacc0x01;
float32x2_t vacc4x01 = vacc0x01;
float32x2_t vacc5x01 = vacc0x01;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const float32x2_t va5 = vld1_f32(a5); a5 += 2;
const float32x2_t vb01c0 = vld1_f32(w); w += 2;
vacc0x01 = vmla_lane_f32(vacc0x01, vb01c0, va0, 0);
vacc1x01 = vmla_lane_f32(vacc1x01, vb01c0, va1, 0);
vacc2x01 = vmla_lane_f32(vacc2x01, vb01c0, va2, 0);
vacc3x01 = vmla_lane_f32(vacc3x01, vb01c0, va3, 0);
vacc4x01 = vmla_lane_f32(vacc4x01, vb01c0, va4, 0);
vacc5x01 = vmla_lane_f32(vacc5x01, vb01c0, va5, 0);
const float32x2_t vb01c1 = vld1_f32(w); w += 2;
vacc0x01 = vmla_lane_f32(vacc0x01, vb01c1, va0, 1);
vacc1x01 = vmla_lane_f32(vacc1x01, vb01c1, va1, 1);
vacc2x01 = vmla_lane_f32(vacc2x01, vb01c1, va2, 1);
vacc3x01 = vmla_lane_f32(vacc3x01, vb01c1, va3, 1);
vacc4x01 = vmla_lane_f32(vacc4x01, vb01c1, va4, 1);
vacc5x01 = vmla_lane_f32(vacc5x01, vb01c1, va5, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x2_t va0 = vld1_dup_f32(a0);
const float32x2_t va1 = vld1_dup_f32(a1);
const float32x2_t va2 = vld1_dup_f32(a2);
const float32x2_t va3 = vld1_dup_f32(a3);
const float32x2_t va4 = vld1_dup_f32(a4);
const float32x2_t va5 = vld1_dup_f32(a5);
const float32x2_t vb01 = vld1_f32(w); w += 2;
vacc0x01 = vmla_f32(vacc0x01, va0, vb01);
vacc1x01 = vmla_f32(vacc1x01, va1, vb01);
vacc2x01 = vmla_f32(vacc2x01, va2, vb01);
vacc3x01 = vmla_f32(vacc3x01, va3, vb01);
vacc4x01 = vmla_f32(vacc4x01, va4, vb01);
vacc5x01 = vmla_f32(vacc5x01, va5, vb01);
}
p -= 6 * sizeof(void*);
} while (p != 0);
const float32x2_t vmax = vld1_dup_f32(¶ms->scalar.max);
vacc0x01 = vmin_f32(vacc0x01, vmax);
vacc1x01 = vmin_f32(vacc1x01, vmax);
vacc2x01 = vmin_f32(vacc2x01, vmax);
vacc3x01 = vmin_f32(vacc3x01, vmax);
vacc4x01 = vmin_f32(vacc4x01, vmax);
vacc5x01 = vmin_f32(vacc5x01, vmax);
const float32x2_t vmin = vld1_dup_f32(¶ms->scalar.min);
vacc0x01 = vmax_f32(vacc0x01, vmin);
vacc1x01 = vmax_f32(vacc1x01, vmin);
vacc2x01 = vmax_f32(vacc2x01, vmin);
vacc3x01 = vmax_f32(vacc3x01, vmin);
vacc4x01 = vmax_f32(vacc4x01, vmin);
vacc5x01 = vmax_f32(vacc5x01, vmin);
if XNN_LIKELY(nc >= 2) {
vst1_f32(c5, vacc5x01);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1_f32(c4, vacc4x01);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1_f32(c3, vacc3x01);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1_f32(c2, vacc2x01);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1_f32(c1, vacc1x01);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1_f32(c0, vacc0x01);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
assert(nc == 1);
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
nc = 0;
}
} while (nc != 0);
}
| 6,385 | 31.090452 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-minmax-aarch64-neonfma-lane-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_6x8__aarch64_neonfma_lane_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
float32x4_t vacc5x0123 = vacc0x0123;
float32x4_t vacc5x4567 = vacc0x4567;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t va1 = vld1q_f32(a1); a1 += 4;
const float32x4_t va2 = vld1q_f32(a2); a2 += 4;
const float32x4_t va3 = vld1q_f32(a3); a3 += 4;
const float32x4_t va4 = vld1q_f32(a4); a4 += 4;
const float32x4_t va5 = vld1q_f32(a5); a5 += 4;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, vget_low_f32(va0), 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, vget_low_f32(va1), 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, vget_low_f32(va2), 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, vget_low_f32(va3), 0);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c0, vget_low_f32(va4), 0);
vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c0, vget_low_f32(va5), 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, vget_low_f32(va0), 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c0, vget_low_f32(va1), 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c0, vget_low_f32(va2), 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c0, vget_low_f32(va3), 0);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c0, vget_low_f32(va4), 0);
vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c0, vget_low_f32(va5), 0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, vget_low_f32(va0), 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, vget_low_f32(va1), 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, vget_low_f32(va2), 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, vget_low_f32(va3), 1);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c1, vget_low_f32(va4), 1);
vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c1, vget_low_f32(va5), 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, vget_low_f32(va0), 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c1, vget_low_f32(va1), 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c1, vget_low_f32(va2), 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c1, vget_low_f32(va3), 1);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c1, vget_low_f32(va4), 1);
vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c1, vget_low_f32(va5), 1);
const float32x4_t vb0123c2 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c2 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c2, vget_high_f32(va0), 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c2, vget_high_f32(va1), 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c2, vget_high_f32(va2), 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c2, vget_high_f32(va3), 0);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c2, vget_high_f32(va4), 0);
vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c2, vget_high_f32(va5), 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c2, vget_high_f32(va0), 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c2, vget_high_f32(va1), 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c2, vget_high_f32(va2), 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c2, vget_high_f32(va3), 0);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c2, vget_high_f32(va4), 0);
vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c2, vget_high_f32(va5), 0);
const float32x4_t vb0123c3 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c3 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c3, vget_high_f32(va0), 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c3, vget_high_f32(va1), 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c3, vget_high_f32(va2), 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c3, vget_high_f32(va3), 1);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c3, vget_high_f32(va4), 1);
vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c3, vget_high_f32(va5), 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c3, vget_high_f32(va0), 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c3, vget_high_f32(va1), 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c3, vget_high_f32(va2), 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c3, vget_high_f32(va3), 1);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c3, vget_high_f32(va4), 1);
vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c3, vget_high_f32(va5), 1);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 6 * sizeof(void*);
} while (p != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 13,067 | 39.8375 | 81 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-minmax-aarch64-neonfma-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_6x8__aarch64_neonfma_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
float32x4_t vacc5x0123 = vacc0x0123;
float32x4_t vacc5x4567 = vacc0x4567;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const float32x2_t va5 = vld1_f32(a5); a5 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c0, va4, 0);
vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c0, va5, 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c0, va4, 0);
vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c0, va5, 0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c1, va4, 1);
vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c1, va5, 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c1, va4, 1);
vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c1, va5, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0);
const float32x4_t va1 = vld1q_dup_f32(a1);
const float32x4_t va2 = vld1q_dup_f32(a2);
const float32x4_t va3 = vld1q_dup_f32(a3);
const float32x4_t va4 = vld1q_dup_f32(a4);
const float32x4_t va5 = vld1q_dup_f32(a5);
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567);
}
p -= 6 * sizeof(void*);
} while (p != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,349 | 35.702128 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-minmax-avx-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_6x8__avx_broadcast(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(w);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc4x01234567 = vacc0x01234567;
__m256 vacc5x01234567 = vacc0x01234567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
do {
const __m256 vb01234567 = _mm256_load_ps(w);
w += 8;
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
const __m256 va5 = _mm256_broadcast_ss(a5);
a5 += 1;
vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
vacc4x01234567 = _mm256_add_ps(vacc4x01234567, _mm256_mul_ps(va4, vb01234567));
vacc5x01234567 = _mm256_add_ps(vacc5x01234567, _mm256_mul_ps(va5, vb01234567));
k -= sizeof(float);
} while (k != 0);
p -= 6 * sizeof(void*);
} while (p != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
vacc5x01234567 = _mm256_max_ps(vmin, vacc5x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
vacc5x01234567 = _mm256_min_ps(vmax, vacc5x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c5, vacc5x01234567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
_mm256_storeu_ps(c4, vacc4x01234567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
__m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567);
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c5, vacc5x0123);
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c5, vacc5x0123);
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c5, vacc5x0123);
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 7,745 | 32.244635 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-minmax-fma3-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_6x8__fma3_broadcast(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(w);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc4x01234567 = vacc0x01234567;
__m256 vacc5x01234567 = vacc0x01234567;
w += 8;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
do {
const __m256 vb01234567 = _mm256_load_ps(w);
w += 8;
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
const __m256 va5 = _mm256_broadcast_ss(a5);
a5 += 1;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
vacc5x01234567 = _mm256_fmadd_ps(va5, vb01234567, vacc5x01234567);
k -= sizeof(float);
} while (k != 0);
p -= 6 * sizeof(void*);
} while (p != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
vacc5x01234567 = _mm256_max_ps(vmin, vacc5x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
vacc5x01234567 = _mm256_min_ps(vmax, vacc5x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c5, vacc5x01234567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
_mm256_storeu_ps(c4, vacc4x01234567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
__m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567);
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c5, vacc5x0123);
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c5, vacc5x0123);
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c5, vacc5x0123);
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 7,668 | 31.914163 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-minmax-neon-dup-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_6x8__neon_dup_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
float32x4_t vacc5x0123 = vacc0x0123;
float32x4_t vacc5x4567 = vacc0x4567;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t va1 = vld1q_f32(a1); a1 += 4;
const float32x4_t va2 = vld1q_f32(a2); a2 += 4;
const float32x4_t va3 = vld1q_f32(a3); a3 += 4;
const float32x4_t va4 = vld1q_f32(a4); a4 += 4;
const float32x4_t va5 = vld1q_f32(a5); a5 += 4;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t va0c0 = vdupq_lane_f32(vget_low_f32(va0), 0);
const float32x4_t va1c0 = vdupq_lane_f32(vget_low_f32(va1), 0);
const float32x4_t va2c0 = vdupq_lane_f32(vget_low_f32(va2), 0);
const float32x4_t va3c0 = vdupq_lane_f32(vget_low_f32(va3), 0);
const float32x4_t va4c0 = vdupq_lane_f32(vget_low_f32(va4), 0);
const float32x4_t va5c0 = vdupq_lane_f32(vget_low_f32(va5), 0);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4c0, vb0123c0);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5c0, vb0123c0);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c0, vb4567c0);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4c0, vb4567c0);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5c0, vb4567c0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
const float32x4_t va0c1 = vdupq_lane_f32(vget_low_f32(va0), 1);
const float32x4_t va1c1 = vdupq_lane_f32(vget_low_f32(va1), 1);
const float32x4_t va2c1 = vdupq_lane_f32(vget_low_f32(va2), 1);
const float32x4_t va3c1 = vdupq_lane_f32(vget_low_f32(va3), 1);
const float32x4_t va4c1 = vdupq_lane_f32(vget_low_f32(va4), 1);
const float32x4_t va5c1 = vdupq_lane_f32(vget_low_f32(va5), 1);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4c1, vb0123c1);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5c1, vb0123c1);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c1, vb4567c1);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4c1, vb4567c1);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5c1, vb4567c1);
const float32x4_t vb0123c2 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c2 = vld1q_f32(w); w += 4;
const float32x4_t va0c2 = vdupq_lane_f32(vget_high_f32(va0), 0);
const float32x4_t va1c2 = vdupq_lane_f32(vget_high_f32(va1), 0);
const float32x4_t va2c2 = vdupq_lane_f32(vget_high_f32(va2), 0);
const float32x4_t va3c2 = vdupq_lane_f32(vget_high_f32(va3), 0);
const float32x4_t va4c2 = vdupq_lane_f32(vget_high_f32(va4), 0);
const float32x4_t va5c2 = vdupq_lane_f32(vget_high_f32(va5), 0);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c2, vb0123c2);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c2, vb0123c2);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c2, vb0123c2);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c2, vb0123c2);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4c2, vb0123c2);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5c2, vb0123c2);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c2, vb4567c2);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c2, vb4567c2);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c2, vb4567c2);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c2, vb4567c2);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4c2, vb4567c2);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5c2, vb4567c2);
const float32x4_t vb0123c3 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c3 = vld1q_f32(w); w += 4;
const float32x4_t va0c3 = vdupq_lane_f32(vget_high_f32(va0), 1);
const float32x4_t va1c3 = vdupq_lane_f32(vget_high_f32(va1), 1);
const float32x4_t va2c3 = vdupq_lane_f32(vget_high_f32(va2), 1);
const float32x4_t va3c3 = vdupq_lane_f32(vget_high_f32(va3), 1);
const float32x4_t va4c3 = vdupq_lane_f32(vget_high_f32(va4), 1);
const float32x4_t va5c3 = vdupq_lane_f32(vget_high_f32(va5), 1);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c3, vb0123c3);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c3, vb0123c3);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c3, vb0123c3);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c3, vb0123c3);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4c3, vb0123c3);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5c3, vb0123c3);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c3, vb4567c3);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c3, vb4567c3);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c3, vb4567c3);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c3, vb4567c3);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4c3, vb4567c3);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5c3, vb4567c3);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 6 * sizeof(void*);
} while (p != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 13,811 | 39.151163 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-minmax-neon-dup-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_6x8__neon_dup_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
float32x4_t vacc5x0123 = vacc0x0123;
float32x4_t vacc5x4567 = vacc0x4567;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const float32x2_t va5 = vld1_f32(a5); a5 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
const float32x4_t va1c0 = vdupq_lane_f32(va1, 0);
const float32x4_t va2c0 = vdupq_lane_f32(va2, 0);
const float32x4_t va3c0 = vdupq_lane_f32(va3, 0);
const float32x4_t va4c0 = vdupq_lane_f32(va4, 0);
const float32x4_t va5c0 = vdupq_lane_f32(va5, 0);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4c0, vb0123c0);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5c0, vb0123c0);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c0, vb4567c0);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4c0, vb4567c0);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5c0, vb4567c0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
const float32x4_t va1c1 = vdupq_lane_f32(va1, 1);
const float32x4_t va2c1 = vdupq_lane_f32(va2, 1);
const float32x4_t va3c1 = vdupq_lane_f32(va3, 1);
const float32x4_t va4c1 = vdupq_lane_f32(va4, 1);
const float32x4_t va5c1 = vdupq_lane_f32(va5, 1);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4c1, vb0123c1);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5c1, vb0123c1);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c1, vb4567c1);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4c1, vb4567c1);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5c1, vb4567c1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0);
const float32x4_t va1 = vld1q_dup_f32(a1);
const float32x4_t va2 = vld1q_dup_f32(a2);
const float32x4_t va3 = vld1q_dup_f32(a3);
const float32x4_t va4 = vld1q_dup_f32(a4);
const float32x4_t va5 = vld1q_dup_f32(a5);
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567);
}
p -= 6 * sizeof(void*);
} while (p != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,937 | 36.204082 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-minmax-neon-lane-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_6x8__neon_lane_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
float32x4_t vacc5x0123 = vacc0x0123;
float32x4_t vacc5x4567 = vacc0x4567;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t va1 = vld1q_f32(a1); a1 += 4;
const float32x4_t va2 = vld1q_f32(a2); a2 += 4;
const float32x4_t va3 = vld1q_f32(a3); a3 += 4;
const float32x4_t va4 = vld1q_f32(a4); a4 += 4;
const float32x4_t va5 = vld1q_f32(a5); a5 += 4;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, vget_low_f32(va0), 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c0, vget_low_f32(va1), 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c0, vget_low_f32(va2), 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c0, vget_low_f32(va3), 0);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c0, vget_low_f32(va4), 0);
vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123c0, vget_low_f32(va5), 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, vget_low_f32(va0), 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c0, vget_low_f32(va1), 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c0, vget_low_f32(va2), 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c0, vget_low_f32(va3), 0);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c0, vget_low_f32(va4), 0);
vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567c0, vget_low_f32(va5), 0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, vget_low_f32(va0), 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c1, vget_low_f32(va1), 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c1, vget_low_f32(va2), 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c1, vget_low_f32(va3), 1);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c1, vget_low_f32(va4), 1);
vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123c1, vget_low_f32(va5), 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, vget_low_f32(va0), 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c1, vget_low_f32(va1), 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c1, vget_low_f32(va2), 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c1, vget_low_f32(va3), 1);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c1, vget_low_f32(va4), 1);
vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567c1, vget_low_f32(va5), 1);
const float32x4_t vb0123c2 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c2 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c2, vget_high_f32(va0), 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c2, vget_high_f32(va1), 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c2, vget_high_f32(va2), 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c2, vget_high_f32(va3), 0);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c2, vget_high_f32(va4), 0);
vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123c2, vget_high_f32(va5), 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c2, vget_high_f32(va0), 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c2, vget_high_f32(va1), 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c2, vget_high_f32(va2), 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c2, vget_high_f32(va3), 0);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c2, vget_high_f32(va4), 0);
vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567c2, vget_high_f32(va5), 0);
const float32x4_t vb0123c3 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c3 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c3, vget_high_f32(va0), 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c3, vget_high_f32(va1), 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c3, vget_high_f32(va2), 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c3, vget_high_f32(va3), 1);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c3, vget_high_f32(va4), 1);
vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123c3, vget_high_f32(va5), 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c3, vget_high_f32(va0), 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c3, vget_high_f32(va1), 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c3, vget_high_f32(va2), 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c3, vget_high_f32(va3), 1);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c3, vget_high_f32(va4), 1);
vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567c3, vget_high_f32(va5), 1);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 6 * sizeof(void*);
} while (p != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 13,056 | 39.803125 | 81 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-minmax-neon-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_6x8__neon_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
float32x4_t vacc5x0123 = vacc0x0123;
float32x4_t vacc5x4567 = vacc0x4567;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const float32x2_t va5 = vld1_f32(a5); a5 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c0, va4, 0);
vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123c0, va5, 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c0, va4, 0);
vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567c0, va5, 0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c1, va4, 1);
vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123c1, va5, 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c1, va4, 1);
vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567c1, va5, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0);
const float32x4_t va1 = vld1q_dup_f32(a1);
const float32x4_t va2 = vld1q_dup_f32(a2);
const float32x4_t va3 = vld1q_dup_f32(a3);
const float32x4_t va4 = vld1q_dup_f32(a4);
const float32x4_t va5 = vld1q_dup_f32(a5);
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567);
}
p -= 6 * sizeof(void*);
} while (p != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,338 | 35.663121 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-igemm/gen/f32-igemm-6x8-minmax-neonfma-dup-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-igemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
void xnn_f32_igemm_minmax_ukernel_6x8__neonfma_dup_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
float* c0 = c;
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
float32x4_t vacc5x0123 = vacc0x0123;
float32x4_t vacc5x4567 = vacc0x4567;
size_t p = ks;
do {
const float* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const float*) ((uintptr_t) a0 + a_offset);
}
const float* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const float*) ((uintptr_t) a1 + a_offset);
}
const float* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const float*) ((uintptr_t) a2 + a_offset);
}
const float* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const float*) ((uintptr_t) a3 + a_offset);
}
const float* restrict a4 = a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const float*) ((uintptr_t) a4 + a_offset);
}
const float* restrict a5 = a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const float*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t va1 = vld1q_f32(a1); a1 += 4;
const float32x4_t va2 = vld1q_f32(a2); a2 += 4;
const float32x4_t va3 = vld1q_f32(a3); a3 += 4;
const float32x4_t va4 = vld1q_f32(a4); a4 += 4;
const float32x4_t va5 = vld1q_f32(a5); a5 += 4;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t va0c0 = vdupq_lane_f32(vget_low_f32(va0), 0);
const float32x4_t va1c0 = vdupq_lane_f32(vget_low_f32(va1), 0);
const float32x4_t va2c0 = vdupq_lane_f32(vget_low_f32(va2), 0);
const float32x4_t va3c0 = vdupq_lane_f32(vget_low_f32(va3), 0);
const float32x4_t va4c0 = vdupq_lane_f32(vget_low_f32(va4), 0);
const float32x4_t va5c0 = vdupq_lane_f32(vget_low_f32(va5), 0);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4c0, vb0123c0);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5c0, vb0123c0);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c0, vb4567c0);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4c0, vb4567c0);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5c0, vb4567c0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
const float32x4_t va0c1 = vdupq_lane_f32(vget_low_f32(va0), 1);
const float32x4_t va1c1 = vdupq_lane_f32(vget_low_f32(va1), 1);
const float32x4_t va2c1 = vdupq_lane_f32(vget_low_f32(va2), 1);
const float32x4_t va3c1 = vdupq_lane_f32(vget_low_f32(va3), 1);
const float32x4_t va4c1 = vdupq_lane_f32(vget_low_f32(va4), 1);
const float32x4_t va5c1 = vdupq_lane_f32(vget_low_f32(va5), 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4c1, vb0123c1);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5c1, vb0123c1);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c1, vb4567c1);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4c1, vb4567c1);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5c1, vb4567c1);
const float32x4_t vb0123c2 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c2 = vld1q_f32(w); w += 4;
const float32x4_t va0c2 = vdupq_lane_f32(vget_high_f32(va0), 0);
const float32x4_t va1c2 = vdupq_lane_f32(vget_high_f32(va1), 0);
const float32x4_t va2c2 = vdupq_lane_f32(vget_high_f32(va2), 0);
const float32x4_t va3c2 = vdupq_lane_f32(vget_high_f32(va3), 0);
const float32x4_t va4c2 = vdupq_lane_f32(vget_high_f32(va4), 0);
const float32x4_t va5c2 = vdupq_lane_f32(vget_high_f32(va5), 0);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c2, vb0123c2);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c2, vb0123c2);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c2, vb0123c2);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c2, vb0123c2);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4c2, vb0123c2);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5c2, vb0123c2);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c2, vb4567c2);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c2, vb4567c2);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c2, vb4567c2);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c2, vb4567c2);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4c2, vb4567c2);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5c2, vb4567c2);
const float32x4_t vb0123c3 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c3 = vld1q_f32(w); w += 4;
const float32x4_t va0c3 = vdupq_lane_f32(vget_high_f32(va0), 1);
const float32x4_t va1c3 = vdupq_lane_f32(vget_high_f32(va1), 1);
const float32x4_t va2c3 = vdupq_lane_f32(vget_high_f32(va2), 1);
const float32x4_t va3c3 = vdupq_lane_f32(vget_high_f32(va3), 1);
const float32x4_t va4c3 = vdupq_lane_f32(vget_high_f32(va4), 1);
const float32x4_t va5c3 = vdupq_lane_f32(vget_high_f32(va5), 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c3, vb0123c3);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c3, vb0123c3);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c3, vb0123c3);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c3, vb0123c3);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4c3, vb0123c3);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5c3, vb0123c3);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c3, vb4567c3);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c3, vb4567c3);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c3, vb4567c3);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c3, vb4567c3);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4c3, vb4567c3);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5c3, vb4567c3);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567);
k -= sizeof(float);
} while (k != 0);
}
p -= 6 * sizeof(void*);
} while (p != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a = (const float**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 13,814 | 39.159884 | 75 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.