repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-3x8s4-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_3x8s4__sse(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
__m128 vacc0x0123 = _mm_load_ps(acc + 0);
__m128 vacc0x4567 = _mm_load_ps(acc + 4);
__m128 vacc1x0123 = _mm_load_ps(acc + 8);
__m128 vacc1x4567 = _mm_load_ps(acc + 12);
__m128 vacc2x0123 = _mm_load_ps(acc + 16);
__m128 vacc2x4567 = _mm_load_ps(acc + 20);
acc += 24;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
__m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
__m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
__m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
__m128 va0 = _mm_loadu_ps(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
__m128 va1 = _mm_loadu_ps(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
__m128 va2 = _mm_loadu_ps(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va0), vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va1), vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va2), vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va0), vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va1), vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va2), vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va0), vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va1), vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va2), vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va0), vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va1), vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va2), vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va0), vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va1), vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va2), vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va0), vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va1), vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va2), vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va0), vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va1), vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va2), vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va0), vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va1), vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va2), vb4567c3));
w += 32;
}
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 11,029 | 42.085938 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-3x8s4-minmax-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_3x8s4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
acc += 24;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567);
w += 32;
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,764 | 43.73384 | 130 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-3x8s4-minmax-wasmrelaxedsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_3x8s4__wasmrelaxedsimd(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
acc += 24;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
w += 32;
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,616 | 43.171103 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-3x8s4-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_3x8s4__wasmsimd_arm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
acc += 24;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
w += 32;
}
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,397 | 42.338403 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-3x8s4-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_3x8s4__wasmsimd_x86(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
acc += 24;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
w += 32;
}
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,409 | 42.38403 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x16-minmax-avx-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x16__avx_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
__m256 vacc0x89ABCDEF = _mm256_load_ps(acc + 8);
__m256 vacc1x01234567 = _mm256_load_ps(acc + 16);
__m256 vacc1x89ABCDEF = _mm256_load_ps(acc + 24);
__m256 vacc2x01234567 = _mm256_load_ps(acc + 32);
__m256 vacc2x89ABCDEF = _mm256_load_ps(acc + 40);
__m256 vacc3x01234567 = _mm256_load_ps(acc + 48);
__m256 vacc3x89ABCDEF = _mm256_load_ps(acc + 56);
acc += 64;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
w += 16;
vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF));
vacc1x89ABCDEF = _mm256_add_ps(vacc1x89ABCDEF, _mm256_mul_ps(va1, vb89ABCDEF));
vacc2x89ABCDEF = _mm256_add_ps(vacc2x89ABCDEF, _mm256_mul_ps(va2, vb89ABCDEF));
vacc3x89ABCDEF = _mm256_add_ps(vacc3x89ABCDEF, _mm256_mul_ps(va3, vb89ABCDEF));
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_max_ps(vmin, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_max_ps(vmin, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_max_ps(vmin, vacc3x89ABCDEF);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_min_ps(vmax, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_min_ps(vmax, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_min_ps(vmax, vacc3x89ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
_mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 8) {
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c0, vacc0x01234567);
vacc3x01234567 = vacc3x89ABCDEF;
vacc2x01234567 = vacc2x89ABCDEF;
vacc1x01234567 = vacc1x89ABCDEF;
vacc0x01234567 = vacc0x89ABCDEF;
c3 += 8;
c2 += 8;
c1 += 8;
c0 += 8;
}
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 6,932 | 33.152709 | 85 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x16-minmax-avx512f-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx512-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_gemminc_minmax_ukernel_4x16__avx512f_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m512 vacc0x0123456789ABCDEF = _mm512_load_ps(acc + 0);
__m512 vacc1x0123456789ABCDEF = _mm512_load_ps(acc + 16);
__m512 vacc2x0123456789ABCDEF = _mm512_load_ps(acc + 32);
__m512 vacc3x0123456789ABCDEF = _mm512_load_ps(acc + 48);
acc += 64;
size_t k = kc;
do {
const __m512 vb0123456789ABCDEF = _mm512_load_ps(w);
w += 16;
const __m512 va0 = _mm512_set1_ps(*a0);
vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
const __m512 va1 = _mm512_set1_ps(*a1);
vacc1x0123456789ABCDEF = _mm512_fmadd_ps(va1, vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
const __m512 va2 = _mm512_set1_ps(*a2);
vacc2x0123456789ABCDEF = _mm512_fmadd_ps(va2, vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
const __m512 va3 = _mm512_set1_ps(*a3);
vacc3x0123456789ABCDEF = _mm512_fmadd_ps(va3, vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
a0 += 1;
a1 += 1;
a2 += 1;
a3 += 1;
k -= sizeof(float);
} while (k != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
vacc0x0123456789ABCDEF = _mm512_max_ps(vmin, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_max_ps(vmin, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_max_ps(vmin, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_max_ps(vmin, vacc3x0123456789ABCDEF);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
vacc0x0123456789ABCDEF = _mm512_min_ps(vmax, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_min_ps(vmax, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_min_ps(vmax, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_min_ps(vmax, vacc3x0123456789ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 15) {
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
_mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
_mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
_mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
_mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
}
nc = 0;
}
} while (nc != 0);
}
| 4,486 | 32.485075 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x16-minmax-fma3-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x16__fma3_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
__m256 vacc0x89ABCDEF = _mm256_load_ps(acc + 8);
__m256 vacc1x01234567 = _mm256_load_ps(acc + 16);
__m256 vacc1x89ABCDEF = _mm256_load_ps(acc + 24);
__m256 vacc2x01234567 = _mm256_load_ps(acc + 32);
__m256 vacc2x89ABCDEF = _mm256_load_ps(acc + 40);
__m256 vacc3x01234567 = _mm256_load_ps(acc + 48);
__m256 vacc3x89ABCDEF = _mm256_load_ps(acc + 56);
acc += 64;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
w += 16;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEF, vacc3x89ABCDEF);
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_max_ps(vmin, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_max_ps(vmin, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_max_ps(vmin, vacc3x89ABCDEF);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_min_ps(vmax, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_min_ps(vmax, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_min_ps(vmax, vacc3x89ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
_mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 8) {
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c0, vacc0x01234567);
vacc3x01234567 = vacc3x89ABCDEF;
vacc2x01234567 = vacc2x89ABCDEF;
vacc1x01234567 = vacc1x89ABCDEF;
vacc0x01234567 = vacc0x89ABCDEF;
c3 += 8;
c2 += 8;
c1 += 8;
c0 += 8;
}
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 6,829 | 32.64532 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x4-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_gemminc_minmax_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
float vacc00 = acc[0];
float vacc01 = acc[1];
float vacc02 = acc[2];
float vacc03 = acc[3];
float vacc10 = acc[4];
float vacc11 = acc[5];
float vacc12 = acc[6];
float vacc13 = acc[7];
float vacc20 = acc[8];
float vacc21 = acc[9];
float vacc22 = acc[10];
float vacc23 = acc[11];
float vacc30 = acc[12];
float vacc31 = acc[13];
float vacc32 = acc[14];
float vacc33 = acc[15];
acc += 16;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = w[0];
const float vb1 = w[1];
const float vb2 = w[2];
const float vb3 = w[3];
w += 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc22 = math_muladd_f32(va2, vb2, vacc22);
vacc23 = math_muladd_f32(va2, vb3, vacc23);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
vacc32 = math_muladd_f32(va3, vb2, vacc32);
vacc33 = math_muladd_f32(va3, vb3, vacc33);
k -= sizeof(float);
} while (k != 0);
vacc00 = math_max_f32(vacc00, vmin);
vacc01 = math_max_f32(vacc01, vmin);
vacc02 = math_max_f32(vacc02, vmin);
vacc03 = math_max_f32(vacc03, vmin);
vacc10 = math_max_f32(vacc10, vmin);
vacc11 = math_max_f32(vacc11, vmin);
vacc12 = math_max_f32(vacc12, vmin);
vacc13 = math_max_f32(vacc13, vmin);
vacc20 = math_max_f32(vacc20, vmin);
vacc21 = math_max_f32(vacc21, vmin);
vacc22 = math_max_f32(vacc22, vmin);
vacc23 = math_max_f32(vacc23, vmin);
vacc30 = math_max_f32(vacc30, vmin);
vacc31 = math_max_f32(vacc31, vmin);
vacc32 = math_max_f32(vacc32, vmin);
vacc33 = math_max_f32(vacc33, vmin);
vacc00 = math_min_f32(vacc00, vmax);
vacc01 = math_min_f32(vacc01, vmax);
vacc02 = math_min_f32(vacc02, vmax);
vacc03 = math_min_f32(vacc03, vmax);
vacc10 = math_min_f32(vacc10, vmax);
vacc11 = math_min_f32(vacc11, vmax);
vacc12 = math_min_f32(vacc12, vmax);
vacc13 = math_min_f32(vacc13, vmax);
vacc20 = math_min_f32(vacc20, vmax);
vacc21 = math_min_f32(vacc21, vmax);
vacc22 = math_min_f32(vacc22, vmax);
vacc23 = math_min_f32(vacc23, vmax);
vacc30 = math_min_f32(vacc30, vmax);
vacc31 = math_min_f32(vacc31, vmax);
vacc32 = math_min_f32(vacc32, vmax);
vacc33 = math_min_f32(vacc33, vmax);
if XNN_LIKELY(nc >= 4) {
c3[0] = vacc30;
c3[1] = vacc31;
c3[2] = vacc32;
c3[3] = vacc33;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2[2] = vacc22;
c2[3] = vacc23;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = vacc30;
c3[1] = vacc31;
vacc30 = vacc32;
c3 += 2;
c2[0] = vacc20;
c2[1] = vacc21;
vacc20 = vacc22;
c2 += 2;
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 5,832 | 27.315534 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x4-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_gemminc_minmax_ukernel_4x4__wasm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
float vacc00 = acc[0];
float vacc01 = acc[1];
float vacc02 = acc[2];
float vacc03 = acc[3];
float vacc10 = acc[4];
float vacc11 = acc[5];
float vacc12 = acc[6];
float vacc13 = acc[7];
float vacc20 = acc[8];
float vacc21 = acc[9];
float vacc22 = acc[10];
float vacc23 = acc[11];
float vacc30 = acc[12];
float vacc31 = acc[13];
float vacc32 = acc[14];
float vacc33 = acc[15];
acc += 16;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = w[0];
const float vb1 = w[1];
const float vb2 = w[2];
const float vb3 = w[3];
w += 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc22 = math_muladd_f32(va2, vb2, vacc22);
vacc23 = math_muladd_f32(va2, vb3, vacc23);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
vacc32 = math_muladd_f32(va3, vb2, vacc32);
vacc33 = math_muladd_f32(va3, vb3, vacc33);
k -= sizeof(float);
} while (k != 0);
vacc00 = __builtin_wasm_max_f32(vacc00, vmin);
vacc01 = __builtin_wasm_max_f32(vacc01, vmin);
vacc02 = __builtin_wasm_max_f32(vacc02, vmin);
vacc03 = __builtin_wasm_max_f32(vacc03, vmin);
vacc10 = __builtin_wasm_max_f32(vacc10, vmin);
vacc11 = __builtin_wasm_max_f32(vacc11, vmin);
vacc12 = __builtin_wasm_max_f32(vacc12, vmin);
vacc13 = __builtin_wasm_max_f32(vacc13, vmin);
vacc20 = __builtin_wasm_max_f32(vacc20, vmin);
vacc21 = __builtin_wasm_max_f32(vacc21, vmin);
vacc22 = __builtin_wasm_max_f32(vacc22, vmin);
vacc23 = __builtin_wasm_max_f32(vacc23, vmin);
vacc30 = __builtin_wasm_max_f32(vacc30, vmin);
vacc31 = __builtin_wasm_max_f32(vacc31, vmin);
vacc32 = __builtin_wasm_max_f32(vacc32, vmin);
vacc33 = __builtin_wasm_max_f32(vacc33, vmin);
vacc00 = __builtin_wasm_min_f32(vacc00, vmax);
vacc01 = __builtin_wasm_min_f32(vacc01, vmax);
vacc02 = __builtin_wasm_min_f32(vacc02, vmax);
vacc03 = __builtin_wasm_min_f32(vacc03, vmax);
vacc10 = __builtin_wasm_min_f32(vacc10, vmax);
vacc11 = __builtin_wasm_min_f32(vacc11, vmax);
vacc12 = __builtin_wasm_min_f32(vacc12, vmax);
vacc13 = __builtin_wasm_min_f32(vacc13, vmax);
vacc20 = __builtin_wasm_min_f32(vacc20, vmax);
vacc21 = __builtin_wasm_min_f32(vacc21, vmax);
vacc22 = __builtin_wasm_min_f32(vacc22, vmax);
vacc23 = __builtin_wasm_min_f32(vacc23, vmax);
vacc30 = __builtin_wasm_min_f32(vacc30, vmax);
vacc31 = __builtin_wasm_min_f32(vacc31, vmax);
vacc32 = __builtin_wasm_min_f32(vacc32, vmax);
vacc33 = __builtin_wasm_min_f32(vacc33, vmax);
if XNN_LIKELY(nc >= 4) {
c3[0] = vacc30;
c3[1] = vacc31;
c3[2] = vacc32;
c3[3] = vacc33;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2[2] = vacc22;
c2[3] = vacc23;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = vacc30;
c3[1] = vacc31;
vacc30 = vacc32;
c3 += 2;
c2[0] = vacc20;
c2[1] = vacc21;
vacc20 = vacc22;
c2 += 2;
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 6,150 | 28.859223 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8-minmax-aarch64-neonfma-lane-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8__aarch64_neonfma_lane_ld128(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x4567 = vld1q_f32(acc); acc += 4;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t va1 = vld1q_f32(a1); a1 += 4;
const float32x4_t va2 = vld1q_f32(a2); a2 += 4;
const float32x4_t va3 = vld1q_f32(a3); a3 += 4;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, vget_low_f32(va0), 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, vget_low_f32(va1), 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, vget_low_f32(va2), 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, vget_low_f32(va3), 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, vget_low_f32(va0), 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c0, vget_low_f32(va1), 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c0, vget_low_f32(va2), 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c0, vget_low_f32(va3), 0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, vget_low_f32(va0), 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, vget_low_f32(va1), 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, vget_low_f32(va2), 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, vget_low_f32(va3), 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, vget_low_f32(va0), 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c1, vget_low_f32(va1), 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c1, vget_low_f32(va2), 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c1, vget_low_f32(va3), 1);
const float32x4_t vb0123c2 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c2 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c2, vget_high_f32(va0), 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c2, vget_high_f32(va1), 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c2, vget_high_f32(va2), 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c2, vget_high_f32(va3), 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c2, vget_high_f32(va0), 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c2, vget_high_f32(va1), 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c2, vget_high_f32(va2), 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c2, vget_high_f32(va3), 0);
const float32x4_t vb0123c3 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c3 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c3, vget_high_f32(va0), 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c3, vget_high_f32(va1), 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c3, vget_high_f32(va2), 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c3, vget_high_f32(va3), 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c3, vget_high_f32(va0), 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c3, vget_high_f32(va1), 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c3, vget_high_f32(va2), 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c3, vget_high_f32(va3), 1);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
k -= sizeof(float);
} while (k != 0);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,941 | 38.048035 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8-minmax-aarch64-neonfma-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8__aarch64_neonfma_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x4567 = vld1q_f32(acc); acc += 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,104 | 34.883838 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8-minmax-avx-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8__avx_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
__m256 vacc1x01234567 = _mm256_load_ps(acc + 8);
__m256 vacc2x01234567 = _mm256_load_ps(acc + 16);
__m256 vacc3x01234567 = _mm256_load_ps(acc + 24);
acc += 32;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
w += 8;
vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 5,211 | 30.39759 | 85 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8-minmax-fma3-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8__fma3_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
__m256 vacc1x01234567 = _mm256_load_ps(acc + 8);
__m256 vacc2x01234567 = _mm256_load_ps(acc + 16);
__m256 vacc3x01234567 = _mm256_load_ps(acc + 24);
acc += 32;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
w += 8;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 5,160 | 30.090361 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8-minmax-neon-dup-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8__neon_dup_ld128(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x4567 = vld1q_f32(acc); acc += 4;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t va1 = vld1q_f32(a1); a1 += 4;
const float32x4_t va2 = vld1q_f32(a2); a2 += 4;
const float32x4_t va3 = vld1q_f32(a3); a3 += 4;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t va0c0 = vdupq_lane_f32(vget_low_f32(va0), 0);
const float32x4_t va1c0 = vdupq_lane_f32(vget_low_f32(va1), 0);
const float32x4_t va2c0 = vdupq_lane_f32(vget_low_f32(va2), 0);
const float32x4_t va3c0 = vdupq_lane_f32(vget_low_f32(va3), 0);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c0, vb4567c0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
const float32x4_t va0c1 = vdupq_lane_f32(vget_low_f32(va0), 1);
const float32x4_t va1c1 = vdupq_lane_f32(vget_low_f32(va1), 1);
const float32x4_t va2c1 = vdupq_lane_f32(vget_low_f32(va2), 1);
const float32x4_t va3c1 = vdupq_lane_f32(vget_low_f32(va3), 1);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c1, vb4567c1);
const float32x4_t vb0123c2 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c2 = vld1q_f32(w); w += 4;
const float32x4_t va0c2 = vdupq_lane_f32(vget_high_f32(va0), 0);
const float32x4_t va1c2 = vdupq_lane_f32(vget_high_f32(va1), 0);
const float32x4_t va2c2 = vdupq_lane_f32(vget_high_f32(va2), 0);
const float32x4_t va3c2 = vdupq_lane_f32(vget_high_f32(va3), 0);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c2, vb0123c2);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c2, vb0123c2);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c2, vb0123c2);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c2, vb0123c2);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c2, vb4567c2);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c2, vb4567c2);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c2, vb4567c2);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c2, vb4567c2);
const float32x4_t vb0123c3 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c3 = vld1q_f32(w); w += 4;
const float32x4_t va0c3 = vdupq_lane_f32(vget_high_f32(va0), 1);
const float32x4_t va1c3 = vdupq_lane_f32(vget_high_f32(va1), 1);
const float32x4_t va2c3 = vdupq_lane_f32(vget_high_f32(va2), 1);
const float32x4_t va3c3 = vdupq_lane_f32(vget_high_f32(va3), 1);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c3, vb0123c3);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c3, vb0123c3);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c3, vb0123c3);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c3, vb0123c3);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c3, vb4567c3);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c3, vb4567c3);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c3, vb4567c3);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c3, vb4567c3);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
k -= sizeof(float);
} while (k != 0);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,401 | 37.37551 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8-minmax-neon-dup-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8__neon_dup_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x4567 = vld1q_f32(acc); acc += 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
const float32x4_t va1c0 = vdupq_lane_f32(va1, 0);
const float32x4_t va2c0 = vdupq_lane_f32(va2, 0);
const float32x4_t va3c0 = vdupq_lane_f32(va3, 0);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c0, vb4567c0);
const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
const float32x4_t va1c1 = vdupq_lane_f32(va1, 1);
const float32x4_t va2c1 = vdupq_lane_f32(va2, 1);
const float32x4_t va3c1 = vdupq_lane_f32(va3, 1);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c1, vb4567c1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,444 | 35.140777 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8-minmax-neon-lane-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8__neon_lane_ld128(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x4567 = vld1q_f32(acc); acc += 4;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t va1 = vld1q_f32(a1); a1 += 4;
const float32x4_t va2 = vld1q_f32(a2); a2 += 4;
const float32x4_t va3 = vld1q_f32(a3); a3 += 4;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, vget_low_f32(va0), 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c0, vget_low_f32(va1), 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c0, vget_low_f32(va2), 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c0, vget_low_f32(va3), 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, vget_low_f32(va0), 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c0, vget_low_f32(va1), 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c0, vget_low_f32(va2), 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c0, vget_low_f32(va3), 0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, vget_low_f32(va0), 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c1, vget_low_f32(va1), 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c1, vget_low_f32(va2), 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c1, vget_low_f32(va3), 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, vget_low_f32(va0), 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c1, vget_low_f32(va1), 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c1, vget_low_f32(va2), 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c1, vget_low_f32(va3), 1);
const float32x4_t vb0123c2 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c2 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c2, vget_high_f32(va0), 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c2, vget_high_f32(va1), 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c2, vget_high_f32(va2), 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c2, vget_high_f32(va3), 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c2, vget_high_f32(va0), 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c2, vget_high_f32(va1), 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c2, vget_high_f32(va2), 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c2, vget_high_f32(va3), 0);
const float32x4_t vb0123c3 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c3 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c3, vget_high_f32(va0), 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c3, vget_high_f32(va1), 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c3, vget_high_f32(va2), 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c3, vget_high_f32(va3), 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c3, vget_high_f32(va0), 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c3, vget_high_f32(va1), 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c3, vget_high_f32(va2), 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c3, vget_high_f32(va3), 1);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
k -= sizeof(float);
} while (k != 0);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,930 | 38 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8-minmax-neon-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8__neon_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x4567 = vld1q_f32(acc); acc += 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,093 | 34.828283 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8-minmax-neonfma-dup-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8__neonfma_dup_ld128(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x4567 = vld1q_f32(acc); acc += 4;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t va1 = vld1q_f32(a1); a1 += 4;
const float32x4_t va2 = vld1q_f32(a2); a2 += 4;
const float32x4_t va3 = vld1q_f32(a3); a3 += 4;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t va0c0 = vdupq_lane_f32(vget_low_f32(va0), 0);
const float32x4_t va1c0 = vdupq_lane_f32(vget_low_f32(va1), 0);
const float32x4_t va2c0 = vdupq_lane_f32(vget_low_f32(va2), 0);
const float32x4_t va3c0 = vdupq_lane_f32(vget_low_f32(va3), 0);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c0, vb4567c0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
const float32x4_t va0c1 = vdupq_lane_f32(vget_low_f32(va0), 1);
const float32x4_t va1c1 = vdupq_lane_f32(vget_low_f32(va1), 1);
const float32x4_t va2c1 = vdupq_lane_f32(vget_low_f32(va2), 1);
const float32x4_t va3c1 = vdupq_lane_f32(vget_low_f32(va3), 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c1, vb4567c1);
const float32x4_t vb0123c2 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c2 = vld1q_f32(w); w += 4;
const float32x4_t va0c2 = vdupq_lane_f32(vget_high_f32(va0), 0);
const float32x4_t va1c2 = vdupq_lane_f32(vget_high_f32(va1), 0);
const float32x4_t va2c2 = vdupq_lane_f32(vget_high_f32(va2), 0);
const float32x4_t va3c2 = vdupq_lane_f32(vget_high_f32(va3), 0);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c2, vb0123c2);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c2, vb0123c2);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c2, vb0123c2);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c2, vb0123c2);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c2, vb4567c2);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c2, vb4567c2);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c2, vb4567c2);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c2, vb4567c2);
const float32x4_t vb0123c3 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c3 = vld1q_f32(w); w += 4;
const float32x4_t va0c3 = vdupq_lane_f32(vget_high_f32(va0), 1);
const float32x4_t va1c3 = vdupq_lane_f32(vget_high_f32(va1), 1);
const float32x4_t va2c3 = vdupq_lane_f32(vget_high_f32(va2), 1);
const float32x4_t va3c3 = vdupq_lane_f32(vget_high_f32(va3), 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c3, vb0123c3);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c3, vb0123c3);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c3, vb0123c3);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c3, vb0123c3);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c3, vb4567c3);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c3, vb4567c3);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c3, vb4567c3);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c3, vb4567c3);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
k -= sizeof(float);
} while (k != 0);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,404 | 37.387755 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8-minmax-neonfma-dup-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8__neonfma_dup_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x4567 = vld1q_f32(acc); acc += 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
const float32x4_t va1c0 = vdupq_lane_f32(va1, 0);
const float32x4_t va2c0 = vdupq_lane_f32(va2, 0);
const float32x4_t va3c0 = vdupq_lane_f32(va3, 0);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c0, vb4567c0);
const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
const float32x4_t va1c1 = vdupq_lane_f32(va1, 1);
const float32x4_t va2c1 = vdupq_lane_f32(va2, 1);
const float32x4_t va3c1 = vdupq_lane_f32(va3, 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c1, vb4567c1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,447 | 35.15534 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8-minmax-sse-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-dup.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8__sse_dup(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128 vacc0x0123 = _mm_load_ps(acc + 0);
__m128 vacc0x4567 = _mm_load_ps(acc + 4);
__m128 vacc1x0123 = _mm_load_ps(acc + 8);
__m128 vacc1x4567 = _mm_load_ps(acc + 12);
__m128 vacc2x0123 = _mm_load_ps(acc + 16);
__m128 vacc2x4567 = _mm_load_ps(acc + 20);
__m128 vacc3x0123 = _mm_load_ps(acc + 24);
__m128 vacc3x4567 = _mm_load_ps(acc + 28);
acc += 32;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
const __m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128 va0c0000 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 va1c0000 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 va2c0000 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 va3c0000 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
const __m128 va0c1111 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 va1c1111 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 va2c1111 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 va3c1111 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
const __m128 va0c2222 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 va1c2222 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 va2c2222 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 va3c2222 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
k -= sizeof(float);
} while (k != 0);
}
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 10,120 | 36.906367 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8-minmax-sse-load1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-load1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8__sse_load1(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128 vacc0x0123 = _mm_load_ps(acc + 0);
__m128 vacc0x4567 = _mm_load_ps(acc + 4);
__m128 vacc1x0123 = _mm_load_ps(acc + 8);
__m128 vacc1x4567 = _mm_load_ps(acc + 12);
__m128 vacc2x0123 = _mm_load_ps(acc + 16);
__m128 vacc2x4567 = _mm_load_ps(acc + 20);
__m128 vacc3x0123 = _mm_load_ps(acc + 24);
__m128 vacc3x4567 = _mm_load_ps(acc + 28);
acc += 32;
size_t k = kc;
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
k -= sizeof(float);
} while (k != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 5,609 | 29.655738 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8-minmax-sse2-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-dup.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128 vacc0x0123 = _mm_load_ps(acc + 0);
__m128 vacc0x4567 = _mm_load_ps(acc + 4);
__m128 vacc1x0123 = _mm_load_ps(acc + 8);
__m128 vacc1x4567 = _mm_load_ps(acc + 12);
__m128 vacc2x0123 = _mm_load_ps(acc + 16);
__m128 vacc2x4567 = _mm_load_ps(acc + 20);
__m128 vacc3x0123 = _mm_load_ps(acc + 24);
__m128 vacc3x4567 = _mm_load_ps(acc + 28);
acc += 32;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
const __m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va3c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va3c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va3c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
k -= sizeof(float);
} while (k != 0);
}
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 10,529 | 38.438202 | 114 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8-minmax-wasmrelaxedsimd-fma-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8__wasmrelaxedsimd_fma_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
acc += 32;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,311 | 33.491803 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8-minmax-wasmrelaxedsimd-fma-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8__wasmrelaxedsimd_fma_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
acc += 32;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb0123c0, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb4567c0, vacc3x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb0123c1, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb4567c1, vacc3x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb0123c2, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb4567c2, vacc3x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb0123c3, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb4567c3, vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,909 | 40.641221 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8-minmax-wasmrelaxedsimd-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8__wasmrelaxedsimd_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
acc += 32;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,283 | 33.338798 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8-minmax-wasmrelaxedsimd-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8__wasmrelaxedsimd_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
acc += 32;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,785 | 40.167939 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8-minmax-wasmsimd-arm-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
acc += 32;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,992 | 31.748634 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8-minmax-wasmsimd-arm-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
acc += 32;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,494 | 39.057252 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8-minmax-wasmsimd-x86-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
acc += 32;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,008 | 31.836066 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8-minmax-wasmsimd-x86-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
acc += 32;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,510 | 39.118321 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8s4-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8s4__sse(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128 vacc0x0123 = _mm_load_ps(acc + 0);
__m128 vacc0x4567 = _mm_load_ps(acc + 4);
__m128 vacc1x0123 = _mm_load_ps(acc + 8);
__m128 vacc1x4567 = _mm_load_ps(acc + 12);
__m128 vacc2x0123 = _mm_load_ps(acc + 16);
__m128 vacc2x4567 = _mm_load_ps(acc + 20);
__m128 vacc3x0123 = _mm_load_ps(acc + 24);
__m128 vacc3x4567 = _mm_load_ps(acc + 28);
acc += 32;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
__m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
__m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
__m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
__m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c3));
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
__m128 va0 = _mm_loadu_ps(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
__m128 va1 = _mm_loadu_ps(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
__m128 va2 = _mm_loadu_ps(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
__m128 va3 = _mm_loadu_ps(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va0), vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va1), vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va2), vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va3), vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va0), vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va1), vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va2), vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va3), vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va0), vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va1), vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va2), vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va3), vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va0), vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va1), vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va2), vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va3), vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va0), vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va1), vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va2), vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va3), vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va0), vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va1), vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va2), vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va3), vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va0), vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va1), vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va2), vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va3), vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va0), vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va1), vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va2), vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va3), vb4567c3));
w += 32;
}
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 14,012 | 44.944262 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8s4-minmax-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8s4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
acc += 32;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c0, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c0, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c1, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c1, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c2, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c2, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c3, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c3, vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc3x4567);
w += 32;
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,944 | 46.900641 | 130 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8s4-minmax-wasmrelaxedsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8s4__wasmrelaxedsimd(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
acc += 32;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
w += 32;
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,748 | 46.272436 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8s4-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8s4__wasmsimd_arm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
acc += 32;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
w += 32;
}
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,457 | 45.339744 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-4x8s4-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_4x8s4__wasmsimd_x86(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
acc += 32;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
w += 32;
}
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,473 | 45.391026 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-5x16-minmax-avx-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_5x16__avx_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
__m256 vacc0x89ABCDEF = _mm256_load_ps(acc + 8);
__m256 vacc1x01234567 = _mm256_load_ps(acc + 16);
__m256 vacc1x89ABCDEF = _mm256_load_ps(acc + 24);
__m256 vacc2x01234567 = _mm256_load_ps(acc + 32);
__m256 vacc2x89ABCDEF = _mm256_load_ps(acc + 40);
__m256 vacc3x01234567 = _mm256_load_ps(acc + 48);
__m256 vacc3x89ABCDEF = _mm256_load_ps(acc + 56);
__m256 vacc4x01234567 = _mm256_load_ps(acc + 64);
__m256 vacc4x89ABCDEF = _mm256_load_ps(acc + 72);
acc += 80;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
w += 16;
vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
vacc4x01234567 = _mm256_add_ps(vacc4x01234567, _mm256_mul_ps(va4, vb01234567));
vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF));
vacc1x89ABCDEF = _mm256_add_ps(vacc1x89ABCDEF, _mm256_mul_ps(va1, vb89ABCDEF));
vacc2x89ABCDEF = _mm256_add_ps(vacc2x89ABCDEF, _mm256_mul_ps(va2, vb89ABCDEF));
vacc3x89ABCDEF = _mm256_add_ps(vacc3x89ABCDEF, _mm256_mul_ps(va3, vb89ABCDEF));
vacc4x89ABCDEF = _mm256_add_ps(vacc4x89ABCDEF, _mm256_mul_ps(va4, vb89ABCDEF));
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_max_ps(vmin, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_max_ps(vmin, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_max_ps(vmin, vacc3x89ABCDEF);
vacc4x89ABCDEF = _mm256_max_ps(vmin, vacc4x89ABCDEF);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_min_ps(vmax, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_min_ps(vmax, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_min_ps(vmax, vacc3x89ABCDEF);
vacc4x89ABCDEF = _mm256_min_ps(vmax, vacc4x89ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm256_storeu_ps(c4, vacc4x01234567);
_mm256_storeu_ps(c4 + 8, vacc4x89ABCDEF);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
_mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 8) {
_mm256_storeu_ps(c4, vacc4x01234567);
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c0, vacc0x01234567);
vacc4x01234567 = vacc4x89ABCDEF;
vacc3x01234567 = vacc3x89ABCDEF;
vacc2x01234567 = vacc2x89ABCDEF;
vacc1x01234567 = vacc1x89ABCDEF;
vacc0x01234567 = vacc0x89ABCDEF;
c4 += 8;
c3 += 8;
c2 += 8;
c1 += 8;
c0 += 8;
}
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 8,332 | 34.611111 | 85 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-5x16-minmax-avx512f-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx512-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_gemminc_minmax_ukernel_5x16__avx512f_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m512 vacc0x0123456789ABCDEF = _mm512_load_ps(acc + 0);
__m512 vacc1x0123456789ABCDEF = _mm512_load_ps(acc + 16);
__m512 vacc2x0123456789ABCDEF = _mm512_load_ps(acc + 32);
__m512 vacc3x0123456789ABCDEF = _mm512_load_ps(acc + 48);
__m512 vacc4x0123456789ABCDEF = _mm512_load_ps(acc + 64);
acc += 80;
size_t k = kc;
do {
const __m512 vb0123456789ABCDEF = _mm512_load_ps(w);
w += 16;
const __m512 va0 = _mm512_set1_ps(*a0);
vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
const __m512 va1 = _mm512_set1_ps(*a1);
vacc1x0123456789ABCDEF = _mm512_fmadd_ps(va1, vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
const __m512 va2 = _mm512_set1_ps(*a2);
vacc2x0123456789ABCDEF = _mm512_fmadd_ps(va2, vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
const __m512 va3 = _mm512_set1_ps(*a3);
vacc3x0123456789ABCDEF = _mm512_fmadd_ps(va3, vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
const __m512 va4 = _mm512_set1_ps(*a4);
vacc4x0123456789ABCDEF = _mm512_fmadd_ps(va4, vb0123456789ABCDEF, vacc4x0123456789ABCDEF);
a0 += 1;
a1 += 1;
a2 += 1;
a3 += 1;
a4 += 1;
k -= sizeof(float);
} while (k != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
vacc0x0123456789ABCDEF = _mm512_max_ps(vmin, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_max_ps(vmin, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_max_ps(vmin, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_max_ps(vmin, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_max_ps(vmin, vacc4x0123456789ABCDEF);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
vacc0x0123456789ABCDEF = _mm512_min_ps(vmax, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_min_ps(vmax, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_min_ps(vmax, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_min_ps(vmax, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_min_ps(vmax, vacc4x0123456789ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm512_storeu_ps(c4, vacc4x0123456789ABCDEF);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 15) {
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
_mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF);
_mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
_mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
_mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
_mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
}
nc = 0;
}
} while (nc != 0);
}
| 5,251 | 34.013333 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-5x16-minmax-fma3-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_5x16__fma3_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
__m256 vacc0x89ABCDEF = _mm256_load_ps(acc + 8);
__m256 vacc1x01234567 = _mm256_load_ps(acc + 16);
__m256 vacc1x89ABCDEF = _mm256_load_ps(acc + 24);
__m256 vacc2x01234567 = _mm256_load_ps(acc + 32);
__m256 vacc2x89ABCDEF = _mm256_load_ps(acc + 40);
__m256 vacc3x01234567 = _mm256_load_ps(acc + 48);
__m256 vacc3x89ABCDEF = _mm256_load_ps(acc + 56);
__m256 vacc4x01234567 = _mm256_load_ps(acc + 64);
__m256 vacc4x89ABCDEF = _mm256_load_ps(acc + 72);
acc += 80;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
w += 16;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEF, vacc3x89ABCDEF);
vacc4x89ABCDEF = _mm256_fmadd_ps(va4, vb89ABCDEF, vacc4x89ABCDEF);
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_max_ps(vmin, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_max_ps(vmin, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_max_ps(vmin, vacc3x89ABCDEF);
vacc4x89ABCDEF = _mm256_max_ps(vmin, vacc4x89ABCDEF);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_min_ps(vmax, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_min_ps(vmax, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_min_ps(vmax, vacc3x89ABCDEF);
vacc4x89ABCDEF = _mm256_min_ps(vmax, vacc4x89ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm256_storeu_ps(c4, vacc4x01234567);
_mm256_storeu_ps(c4 + 8, vacc4x89ABCDEF);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
_mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 8) {
_mm256_storeu_ps(c4, vacc4x01234567);
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c0, vacc0x01234567);
vacc4x01234567 = vacc4x89ABCDEF;
vacc3x01234567 = vacc3x89ABCDEF;
vacc2x01234567 = vacc2x89ABCDEF;
vacc1x01234567 = vacc1x89ABCDEF;
vacc0x01234567 = vacc0x89ABCDEF;
c4 += 8;
c3 += 8;
c2 += 8;
c1 += 8;
c0 += 8;
}
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 8,203 | 34.059829 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-5x8-minmax-aarch64-neonfma-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_5x8__aarch64_neonfma_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc4x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc4x4567 = vld1q_f32(acc); acc += 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c0, va4, 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c0, va4, 0);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c1, va4, 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c1, va4, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,487 | 36.22807 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-5x8-minmax-avx-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_5x8__avx_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
__m256 vacc1x01234567 = _mm256_load_ps(acc + 8);
__m256 vacc2x01234567 = _mm256_load_ps(acc + 16);
__m256 vacc3x01234567 = _mm256_load_ps(acc + 24);
__m256 vacc4x01234567 = _mm256_load_ps(acc + 32);
acc += 40;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
w += 8;
vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
vacc4x01234567 = _mm256_add_ps(vacc4x01234567, _mm256_mul_ps(va4, vb01234567));
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c4, vacc4x01234567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 6,203 | 31.825397 | 85 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-5x8-minmax-fma3-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_5x8__fma3_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
__m256 vacc1x01234567 = _mm256_load_ps(acc + 8);
__m256 vacc2x01234567 = _mm256_load_ps(acc + 16);
__m256 vacc3x01234567 = _mm256_load_ps(acc + 24);
__m256 vacc4x01234567 = _mm256_load_ps(acc + 32);
acc += 40;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
w += 8;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c4, vacc4x01234567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 6,139 | 31.486772 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-5x8-minmax-neon-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_5x8__neon_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc4x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc4x4567 = vld1q_f32(acc); acc += 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c0, va4, 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c0, va4, 0);
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c1, va4, 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c1, va4, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,476 | 36.179825 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-5x8-minmax-sse-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-dup.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m128 vacc0x0123 = _mm_load_ps(acc + 0);
__m128 vacc0x4567 = _mm_load_ps(acc + 4);
__m128 vacc1x0123 = _mm_load_ps(acc + 8);
__m128 vacc1x4567 = _mm_load_ps(acc + 12);
__m128 vacc2x0123 = _mm_load_ps(acc + 16);
__m128 vacc2x4567 = _mm_load_ps(acc + 20);
__m128 vacc3x0123 = _mm_load_ps(acc + 24);
__m128 vacc3x4567 = _mm_load_ps(acc + 28);
__m128 vacc4x0123 = _mm_load_ps(acc + 32);
__m128 vacc4x4567 = _mm_load_ps(acc + 36);
acc += 40;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
const __m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128 va4 = _mm_loadu_ps(a4);
a4 += 4;
const __m128 va0c0000 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 va1c0000 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 va2c0000 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 va3c0000 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 va4c0000 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c0000, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c0000, vb4567c0));
const __m128 va0c1111 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 va1c1111 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 va2c1111 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 va3c1111 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 va4c1111 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c1111, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c1111, vb4567c1));
const __m128 va0c2222 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 va1c2222 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 va2c2222 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 va3c2222 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 va4c2222 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c2222, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c2222, vb4567c2));
const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va4c3333 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c3333, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c3333, vb4567c3));
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
const __m128 va4 = _mm_load1_ps(a4);
a4 += 1;
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
k -= sizeof(float);
} while (k != 0);
}
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 12,195 | 38.597403 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-5x8-minmax-sse-load1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-load1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_5x8__sse_load1(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m128 vacc0x0123 = _mm_load_ps(acc + 0);
__m128 vacc0x4567 = _mm_load_ps(acc + 4);
__m128 vacc1x0123 = _mm_load_ps(acc + 8);
__m128 vacc1x4567 = _mm_load_ps(acc + 12);
__m128 vacc2x0123 = _mm_load_ps(acc + 16);
__m128 vacc2x4567 = _mm_load_ps(acc + 20);
__m128 vacc3x0123 = _mm_load_ps(acc + 24);
__m128 vacc3x4567 = _mm_load_ps(acc + 28);
__m128 vacc4x0123 = _mm_load_ps(acc + 32);
__m128 vacc4x4567 = _mm_load_ps(acc + 36);
acc += 40;
size_t k = kc;
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
const __m128 va4 = _mm_load1_ps(a4);
a4 += 1;
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
k -= sizeof(float);
} while (k != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 6,694 | 30.880952 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-5x8-minmax-sse2-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-dup.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m128 vacc0x0123 = _mm_load_ps(acc + 0);
__m128 vacc0x4567 = _mm_load_ps(acc + 4);
__m128 vacc1x0123 = _mm_load_ps(acc + 8);
__m128 vacc1x4567 = _mm_load_ps(acc + 12);
__m128 vacc2x0123 = _mm_load_ps(acc + 16);
__m128 vacc2x4567 = _mm_load_ps(acc + 20);
__m128 vacc3x0123 = _mm_load_ps(acc + 24);
__m128 vacc3x4567 = _mm_load_ps(acc + 28);
__m128 vacc4x0123 = _mm_load_ps(acc + 32);
__m128 vacc4x4567 = _mm_load_ps(acc + 36);
acc += 40;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
const __m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128 va4 = _mm_loadu_ps(a4);
a4 += 4;
const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va3c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va4c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c0000, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c0000, vb4567c0));
const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va3c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va4c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c1111, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c1111, vb4567c1));
const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va3c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va4c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c2222, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c2222, vb4567c2));
const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va4c3333 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c3333, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c3333, vb4567c3));
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
const __m128 va4 = _mm_load1_ps(a4);
a4 += 1;
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
k -= sizeof(float);
} while (k != 0);
}
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 12,706 | 40.256494 | 114 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-5x8-minmax-wasmrelaxedsimd-fma-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_5x8__wasmrelaxedsimd_fma_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
v128_t vacc4x0123 = wasm_v128_load(acc + 32);
v128_t vacc4x4567 = wasm_v128_load(acc + 36);
acc += 40;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567);
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,555 | 34.980952 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-5x8-minmax-wasmrelaxedsimd-fma-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_5x8__wasmrelaxedsimd_fma_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
v128_t vacc4x0123 = wasm_v128_load(acc + 32);
v128_t vacc4x4567 = wasm_v128_load(acc + 36);
acc += 40;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb0123c0, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb4567c0, vacc4x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb0123c1, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb4567c1, vacc4x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb0123c2, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb4567c2, vacc4x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb0123c3, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb4567c3, vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 13,161 | 42.438944 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-5x8-minmax-wasmrelaxedsimd-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_5x8__wasmrelaxedsimd_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
v128_t vacc4x0123 = wasm_v128_load(acc + 32);
v128_t vacc4x4567 = wasm_v128_load(acc + 36);
acc += 40;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,521 | 34.819048 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-5x8-minmax-wasmrelaxedsimd-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_5x8__wasmrelaxedsimd_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
v128_t vacc4x0123 = wasm_v128_load(acc + 32);
v128_t vacc4x4567 = wasm_v128_load(acc + 36);
acc += 40;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 13,007 | 41.930693 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-5x8-minmax-wasmsimd-arm-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
v128_t vacc4x0123 = wasm_v128_load(acc + 32);
v128_t vacc4x4567 = wasm_v128_load(acc + 36);
acc += 40;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_max(vmin, vacc4x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_max(vmin, vacc4x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_min(vmax, vacc4x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_min(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,158 | 33.090476 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-5x8-minmax-wasmsimd-arm-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
v128_t vacc4x0123 = wasm_v128_load(acc + 32);
v128_t vacc4x4567 = wasm_v128_load(acc + 36);
acc += 40;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_max(vmin, vacc4x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_max(vmin, vacc4x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_min(vmax, vacc4x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_min(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,644 | 40.732673 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-5x8-minmax-wasmsimd-x86-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
v128_t vacc4x0123 = wasm_v128_load(acc + 32);
v128_t vacc4x4567 = wasm_v128_load(acc + 36);
acc += 40;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmax(vmin, vacc4x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmax(vmin, vacc4x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmin(vmax, vacc4x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmin(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,178 | 33.185714 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-5x8-minmax-wasmsimd-x86-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
v128_t vacc4x0123 = wasm_v128_load(acc + 32);
v128_t vacc4x4567 = wasm_v128_load(acc + 36);
acc += 40;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmax(vmin, vacc4x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmax(vmin, vacc4x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmin(vmax, vacc4x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmin(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,664 | 40.79868 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-5x8s4-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_5x8s4__sse(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m128 vacc0x0123 = _mm_load_ps(acc + 0);
__m128 vacc0x4567 = _mm_load_ps(acc + 4);
__m128 vacc1x0123 = _mm_load_ps(acc + 8);
__m128 vacc1x4567 = _mm_load_ps(acc + 12);
__m128 vacc2x0123 = _mm_load_ps(acc + 16);
__m128 vacc2x4567 = _mm_load_ps(acc + 20);
__m128 vacc3x0123 = _mm_load_ps(acc + 24);
__m128 vacc3x4567 = _mm_load_ps(acc + 28);
__m128 vacc4x0123 = _mm_load_ps(acc + 32);
__m128 vacc4x4567 = _mm_load_ps(acc + 36);
acc += 40;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
__m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
__m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
__m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
__m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
__m128 va4 = _mm_loadu_ps(a4);
a4 += 4;
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c0));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c0));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c1));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c1));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c2));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c2));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c3));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c3));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c3));
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
__m128 va0 = _mm_loadu_ps(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
__m128 va1 = _mm_loadu_ps(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
__m128 va2 = _mm_loadu_ps(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
__m128 va3 = _mm_loadu_ps(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
__m128 va4 = _mm_loadu_ps(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va0), vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va1), vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va2), vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va3), vb0123c0));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va4), vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va0), vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va1), vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va2), vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va3), vb4567c0));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va4), vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va0), vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va1), vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va2), vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va3), vb0123c1));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va4), vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va0), vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va1), vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va2), vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va3), vb4567c1));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va4), vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va0), vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va1), vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va2), vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va3), vb0123c2));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va4), vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va0), vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va1), vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va2), vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va3), vb4567c2));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va4), vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va0), vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va1), vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va2), vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va3), vb0123c3));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va4), vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va0), vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va1), vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va2), vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va3), vb4567c3));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va4), vb4567c3));
w += 32;
}
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 16,994 | 47.008475 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-5x8s4-minmax-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_5x8s4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
v128_t vacc4x0123 = wasm_v128_load(acc + 32);
v128_t vacc4x4567 = wasm_v128_load(acc + 36);
acc += 40;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c0, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c0, vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c1, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c1, vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c2, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c2, vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c3, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c3, vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
v128_t va4 = wasm_v128_load(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc4x4567);
w += 32;
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 18,123 | 49.204986 | 130 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-5x8s4-minmax-wasmrelaxedsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_5x8s4__wasmrelaxedsimd(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
v128_t vacc4x0123 = wasm_v128_load(acc + 32);
v128_t vacc4x4567 = wasm_v128_load(acc + 36);
acc += 40;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
v128_t va4 = wasm_v128_load(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567);
w += 32;
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 17,879 | 48.529086 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-5x8s4-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_5x8s4__wasmsimd_arm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
v128_t vacc4x0123 = wasm_v128_load(acc + 32);
v128_t vacc4x4567 = wasm_v128_load(acc + 36);
acc += 40;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
v128_t va4 = wasm_v128_load(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567);
w += 32;
}
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_max(vmin, vacc4x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_max(vmin, vacc4x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_min(vmax, vacc4x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_min(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 17,516 | 47.523546 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-5x8s4-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_5x8s4__wasmsimd_x86(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
v128_t vacc4x0123 = wasm_v128_load(acc + 32);
v128_t vacc4x4567 = wasm_v128_load(acc + 36);
acc += 40;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
v128_t va4 = wasm_v128_load(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567);
w += 32;
}
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmax(vmin, vacc4x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmax(vmin, vacc4x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmin(vmax, vacc4x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmin(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 17,536 | 47.578947 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-6x16-minmax-avx512f-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx512-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_gemminc_minmax_ukernel_6x16__avx512f_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
__m512 vacc0x0123456789ABCDEF = _mm512_load_ps(acc + 0);
__m512 vacc1x0123456789ABCDEF = _mm512_load_ps(acc + 16);
__m512 vacc2x0123456789ABCDEF = _mm512_load_ps(acc + 32);
__m512 vacc3x0123456789ABCDEF = _mm512_load_ps(acc + 48);
__m512 vacc4x0123456789ABCDEF = _mm512_load_ps(acc + 64);
__m512 vacc5x0123456789ABCDEF = _mm512_load_ps(acc + 80);
acc += 96;
size_t k = kc;
do {
const __m512 vb0123456789ABCDEF = _mm512_load_ps(w);
w += 16;
const __m512 va0 = _mm512_set1_ps(*a0);
vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
const __m512 va1 = _mm512_set1_ps(*a1);
vacc1x0123456789ABCDEF = _mm512_fmadd_ps(va1, vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
const __m512 va2 = _mm512_set1_ps(*a2);
vacc2x0123456789ABCDEF = _mm512_fmadd_ps(va2, vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
const __m512 va3 = _mm512_set1_ps(*a3);
vacc3x0123456789ABCDEF = _mm512_fmadd_ps(va3, vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
const __m512 va4 = _mm512_set1_ps(*a4);
vacc4x0123456789ABCDEF = _mm512_fmadd_ps(va4, vb0123456789ABCDEF, vacc4x0123456789ABCDEF);
const __m512 va5 = _mm512_set1_ps(*a5);
vacc5x0123456789ABCDEF = _mm512_fmadd_ps(va5, vb0123456789ABCDEF, vacc5x0123456789ABCDEF);
a0 += 1;
a1 += 1;
a2 += 1;
a3 += 1;
a4 += 1;
a5 += 1;
k -= sizeof(float);
} while (k != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
vacc0x0123456789ABCDEF = _mm512_max_ps(vmin, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_max_ps(vmin, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_max_ps(vmin, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_max_ps(vmin, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_max_ps(vmin, vacc4x0123456789ABCDEF);
vacc5x0123456789ABCDEF = _mm512_max_ps(vmin, vacc5x0123456789ABCDEF);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
vacc0x0123456789ABCDEF = _mm512_min_ps(vmax, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_min_ps(vmax, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_min_ps(vmax, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_min_ps(vmax, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_min_ps(vmax, vacc4x0123456789ABCDEF);
vacc5x0123456789ABCDEF = _mm512_min_ps(vmax, vacc5x0123456789ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm512_storeu_ps(c5, vacc5x0123456789ABCDEF);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
_mm512_storeu_ps(c4, vacc4x0123456789ABCDEF);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 15) {
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
_mm512_mask_storeu_ps(c5, vmask, vacc5x0123456789ABCDEF);
_mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF);
_mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
_mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
_mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
_mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
}
nc = 0;
}
} while (nc != 0);
}
| 6,017 | 35.253012 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-6x8-minmax-aarch64-neonfma-lane-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_6x8__aarch64_neonfma_lane_ld128(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc4x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc4x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc5x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc5x4567 = vld1q_f32(acc); acc += 4;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t va1 = vld1q_f32(a1); a1 += 4;
const float32x4_t va2 = vld1q_f32(a2); a2 += 4;
const float32x4_t va3 = vld1q_f32(a3); a3 += 4;
const float32x4_t va4 = vld1q_f32(a4); a4 += 4;
const float32x4_t va5 = vld1q_f32(a5); a5 += 4;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, vget_low_f32(va0), 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, vget_low_f32(va1), 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, vget_low_f32(va2), 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, vget_low_f32(va3), 0);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c0, vget_low_f32(va4), 0);
vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c0, vget_low_f32(va5), 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, vget_low_f32(va0), 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c0, vget_low_f32(va1), 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c0, vget_low_f32(va2), 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c0, vget_low_f32(va3), 0);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c0, vget_low_f32(va4), 0);
vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c0, vget_low_f32(va5), 0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, vget_low_f32(va0), 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, vget_low_f32(va1), 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, vget_low_f32(va2), 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, vget_low_f32(va3), 1);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c1, vget_low_f32(va4), 1);
vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c1, vget_low_f32(va5), 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, vget_low_f32(va0), 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c1, vget_low_f32(va1), 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c1, vget_low_f32(va2), 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c1, vget_low_f32(va3), 1);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c1, vget_low_f32(va4), 1);
vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c1, vget_low_f32(va5), 1);
const float32x4_t vb0123c2 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c2 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c2, vget_high_f32(va0), 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c2, vget_high_f32(va1), 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c2, vget_high_f32(va2), 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c2, vget_high_f32(va3), 0);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c2, vget_high_f32(va4), 0);
vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c2, vget_high_f32(va5), 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c2, vget_high_f32(va0), 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c2, vget_high_f32(va1), 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c2, vget_high_f32(va2), 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c2, vget_high_f32(va3), 0);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c2, vget_high_f32(va4), 0);
vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c2, vget_high_f32(va5), 0);
const float32x4_t vb0123c3 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c3 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c3, vget_high_f32(va0), 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c3, vget_high_f32(va1), 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c3, vget_high_f32(va2), 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c3, vget_high_f32(va3), 1);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c3, vget_high_f32(va4), 1);
vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c3, vget_high_f32(va5), 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c3, vget_high_f32(va0), 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c3, vget_high_f32(va1), 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c3, vget_high_f32(va2), 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c3, vget_high_f32(va3), 1);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c3, vget_high_f32(va4), 1);
vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c3, vget_high_f32(va5), 1);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567);
k -= sizeof(float);
} while (k != 0);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,474 | 41.003367 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-6x8-minmax-aarch64-neonfma-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_6x8__aarch64_neonfma_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc4x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc4x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc5x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc5x4567 = vld1q_f32(acc); acc += 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const float32x2_t va5 = vld1_f32(a5); a5 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c0, va4, 0);
vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c0, va5, 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c0, va4, 0);
vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c0, va5, 0);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c1, va4, 1);
vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c1, va5, 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c1, va4, 1);
vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c1, va5, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,871 | 37.263566 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-6x8-minmax-avx-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_6x8__avx_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
__m256 vacc1x01234567 = _mm256_load_ps(acc + 8);
__m256 vacc2x01234567 = _mm256_load_ps(acc + 16);
__m256 vacc3x01234567 = _mm256_load_ps(acc + 24);
__m256 vacc4x01234567 = _mm256_load_ps(acc + 32);
__m256 vacc5x01234567 = _mm256_load_ps(acc + 40);
acc += 48;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
const __m256 va5 = _mm256_broadcast_ss(a5);
a5 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
w += 8;
vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
vacc4x01234567 = _mm256_add_ps(vacc4x01234567, _mm256_mul_ps(va4, vb01234567));
vacc5x01234567 = _mm256_add_ps(vacc5x01234567, _mm256_mul_ps(va5, vb01234567));
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
vacc5x01234567 = _mm256_max_ps(vmin, vacc5x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
vacc5x01234567 = _mm256_min_ps(vmax, vacc5x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c5, vacc5x01234567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
_mm256_storeu_ps(c4, vacc4x01234567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
__m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567);
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c5, vacc5x0123);
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c5, vacc5x0123);
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c5, vacc5x0123);
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 7,196 | 32.948113 | 85 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-6x8-minmax-fma3-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_6x8__fma3_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
__m256 vacc1x01234567 = _mm256_load_ps(acc + 8);
__m256 vacc2x01234567 = _mm256_load_ps(acc + 16);
__m256 vacc3x01234567 = _mm256_load_ps(acc + 24);
__m256 vacc4x01234567 = _mm256_load_ps(acc + 32);
__m256 vacc5x01234567 = _mm256_load_ps(acc + 40);
acc += 48;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
const __m256 va5 = _mm256_broadcast_ss(a5);
a5 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
w += 8;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
vacc5x01234567 = _mm256_fmadd_ps(va5, vb01234567, vacc5x01234567);
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
vacc5x01234567 = _mm256_max_ps(vmin, vacc5x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
vacc5x01234567 = _mm256_min_ps(vmax, vacc5x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c5, vacc5x01234567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
_mm256_storeu_ps(c4, vacc4x01234567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
__m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567);
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c5, vacc5x0123);
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c5, vacc5x0123);
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c5, vacc5x0123);
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 7,119 | 32.584906 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-6x8-minmax-neon-dup-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_6x8__neon_dup_ld128(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc4x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc4x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc5x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc5x4567 = vld1q_f32(acc); acc += 4;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t va1 = vld1q_f32(a1); a1 += 4;
const float32x4_t va2 = vld1q_f32(a2); a2 += 4;
const float32x4_t va3 = vld1q_f32(a3); a3 += 4;
const float32x4_t va4 = vld1q_f32(a4); a4 += 4;
const float32x4_t va5 = vld1q_f32(a5); a5 += 4;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t va0c0 = vdupq_lane_f32(vget_low_f32(va0), 0);
const float32x4_t va1c0 = vdupq_lane_f32(vget_low_f32(va1), 0);
const float32x4_t va2c0 = vdupq_lane_f32(vget_low_f32(va2), 0);
const float32x4_t va3c0 = vdupq_lane_f32(vget_low_f32(va3), 0);
const float32x4_t va4c0 = vdupq_lane_f32(vget_low_f32(va4), 0);
const float32x4_t va5c0 = vdupq_lane_f32(vget_low_f32(va5), 0);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4c0, vb0123c0);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5c0, vb0123c0);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c0, vb4567c0);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4c0, vb4567c0);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5c0, vb4567c0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
const float32x4_t va0c1 = vdupq_lane_f32(vget_low_f32(va0), 1);
const float32x4_t va1c1 = vdupq_lane_f32(vget_low_f32(va1), 1);
const float32x4_t va2c1 = vdupq_lane_f32(vget_low_f32(va2), 1);
const float32x4_t va3c1 = vdupq_lane_f32(vget_low_f32(va3), 1);
const float32x4_t va4c1 = vdupq_lane_f32(vget_low_f32(va4), 1);
const float32x4_t va5c1 = vdupq_lane_f32(vget_low_f32(va5), 1);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4c1, vb0123c1);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5c1, vb0123c1);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c1, vb4567c1);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4c1, vb4567c1);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5c1, vb4567c1);
const float32x4_t vb0123c2 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c2 = vld1q_f32(w); w += 4;
const float32x4_t va0c2 = vdupq_lane_f32(vget_high_f32(va0), 0);
const float32x4_t va1c2 = vdupq_lane_f32(vget_high_f32(va1), 0);
const float32x4_t va2c2 = vdupq_lane_f32(vget_high_f32(va2), 0);
const float32x4_t va3c2 = vdupq_lane_f32(vget_high_f32(va3), 0);
const float32x4_t va4c2 = vdupq_lane_f32(vget_high_f32(va4), 0);
const float32x4_t va5c2 = vdupq_lane_f32(vget_high_f32(va5), 0);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c2, vb0123c2);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c2, vb0123c2);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c2, vb0123c2);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c2, vb0123c2);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4c2, vb0123c2);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5c2, vb0123c2);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c2, vb4567c2);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c2, vb4567c2);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c2, vb4567c2);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c2, vb4567c2);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4c2, vb4567c2);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5c2, vb4567c2);
const float32x4_t vb0123c3 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c3 = vld1q_f32(w); w += 4;
const float32x4_t va0c3 = vdupq_lane_f32(vget_high_f32(va0), 1);
const float32x4_t va1c3 = vdupq_lane_f32(vget_high_f32(va1), 1);
const float32x4_t va2c3 = vdupq_lane_f32(vget_high_f32(va2), 1);
const float32x4_t va3c3 = vdupq_lane_f32(vget_high_f32(va3), 1);
const float32x4_t va4c3 = vdupq_lane_f32(vget_high_f32(va4), 1);
const float32x4_t va5c3 = vdupq_lane_f32(vget_high_f32(va5), 1);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c3, vb0123c3);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c3, vb0123c3);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c3, vb0123c3);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c3, vb0123c3);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4c3, vb0123c3);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5c3, vb0123c3);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c3, vb4567c3);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c3, vb4567c3);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c3, vb4567c3);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c3, vb4567c3);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4c3, vb4567c3);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5c3, vb4567c3);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567);
k -= sizeof(float);
} while (k != 0);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 13,170 | 40.031153 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-6x8-minmax-neon-dup-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_6x8__neon_dup_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc4x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc4x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc5x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc5x4567 = vld1q_f32(acc); acc += 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const float32x2_t va5 = vld1_f32(a5); a5 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
const float32x4_t va1c0 = vdupq_lane_f32(va1, 0);
const float32x4_t va2c0 = vdupq_lane_f32(va2, 0);
const float32x4_t va3c0 = vdupq_lane_f32(va3, 0);
const float32x4_t va4c0 = vdupq_lane_f32(va4, 0);
const float32x4_t va5c0 = vdupq_lane_f32(va5, 0);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4c0, vb0123c0);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5c0, vb0123c0);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c0, vb4567c0);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4c0, vb4567c0);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5c0, vb4567c0);
const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
const float32x4_t va1c1 = vdupq_lane_f32(va1, 1);
const float32x4_t va2c1 = vdupq_lane_f32(va2, 1);
const float32x4_t va3c1 = vdupq_lane_f32(va3, 1);
const float32x4_t va4c1 = vdupq_lane_f32(va4, 1);
const float32x4_t va5c1 = vdupq_lane_f32(va5, 1);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4c1, vb0123c1);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5c1, vb0123c1);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c1, vb4567c1);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4c1, vb4567c1);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5c1, vb4567c1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,387 | 37.474074 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-6x8-minmax-neon-lane-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_6x8__neon_lane_ld128(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc4x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc4x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc5x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc5x4567 = vld1q_f32(acc); acc += 4;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t va1 = vld1q_f32(a1); a1 += 4;
const float32x4_t va2 = vld1q_f32(a2); a2 += 4;
const float32x4_t va3 = vld1q_f32(a3); a3 += 4;
const float32x4_t va4 = vld1q_f32(a4); a4 += 4;
const float32x4_t va5 = vld1q_f32(a5); a5 += 4;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, vget_low_f32(va0), 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c0, vget_low_f32(va1), 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c0, vget_low_f32(va2), 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c0, vget_low_f32(va3), 0);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c0, vget_low_f32(va4), 0);
vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123c0, vget_low_f32(va5), 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, vget_low_f32(va0), 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c0, vget_low_f32(va1), 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c0, vget_low_f32(va2), 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c0, vget_low_f32(va3), 0);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c0, vget_low_f32(va4), 0);
vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567c0, vget_low_f32(va5), 0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, vget_low_f32(va0), 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c1, vget_low_f32(va1), 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c1, vget_low_f32(va2), 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c1, vget_low_f32(va3), 1);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c1, vget_low_f32(va4), 1);
vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123c1, vget_low_f32(va5), 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, vget_low_f32(va0), 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c1, vget_low_f32(va1), 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c1, vget_low_f32(va2), 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c1, vget_low_f32(va3), 1);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c1, vget_low_f32(va4), 1);
vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567c1, vget_low_f32(va5), 1);
const float32x4_t vb0123c2 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c2 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c2, vget_high_f32(va0), 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c2, vget_high_f32(va1), 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c2, vget_high_f32(va2), 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c2, vget_high_f32(va3), 0);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c2, vget_high_f32(va4), 0);
vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123c2, vget_high_f32(va5), 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c2, vget_high_f32(va0), 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c2, vget_high_f32(va1), 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c2, vget_high_f32(va2), 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c2, vget_high_f32(va3), 0);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c2, vget_high_f32(va4), 0);
vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567c2, vget_high_f32(va5), 0);
const float32x4_t vb0123c3 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c3 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c3, vget_high_f32(va0), 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c3, vget_high_f32(va1), 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c3, vget_high_f32(va2), 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c3, vget_high_f32(va3), 1);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c3, vget_high_f32(va4), 1);
vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123c3, vget_high_f32(va5), 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c3, vget_high_f32(va0), 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c3, vget_high_f32(va1), 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c3, vget_high_f32(va2), 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c3, vget_high_f32(va3), 1);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c3, vget_high_f32(va4), 1);
vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567c3, vget_high_f32(va5), 1);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567);
k -= sizeof(float);
} while (k != 0);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,463 | 40.96633 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-6x8-minmax-neon-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_6x8__neon_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc4x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc4x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc5x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc5x4567 = vld1q_f32(acc); acc += 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const float32x2_t va5 = vld1_f32(a5); a5 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c0, va4, 0);
vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123c0, va5, 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c0, va4, 0);
vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567c0, va5, 0);
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c1, va4, 1);
vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123c1, va5, 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c1, va4, 1);
vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567c1, va5, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,860 | 37.22093 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-6x8-minmax-neonfma-dup-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_6x8__neonfma_dup_ld128(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc4x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc4x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc5x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc5x4567 = vld1q_f32(acc); acc += 4;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t va1 = vld1q_f32(a1); a1 += 4;
const float32x4_t va2 = vld1q_f32(a2); a2 += 4;
const float32x4_t va3 = vld1q_f32(a3); a3 += 4;
const float32x4_t va4 = vld1q_f32(a4); a4 += 4;
const float32x4_t va5 = vld1q_f32(a5); a5 += 4;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t va0c0 = vdupq_lane_f32(vget_low_f32(va0), 0);
const float32x4_t va1c0 = vdupq_lane_f32(vget_low_f32(va1), 0);
const float32x4_t va2c0 = vdupq_lane_f32(vget_low_f32(va2), 0);
const float32x4_t va3c0 = vdupq_lane_f32(vget_low_f32(va3), 0);
const float32x4_t va4c0 = vdupq_lane_f32(vget_low_f32(va4), 0);
const float32x4_t va5c0 = vdupq_lane_f32(vget_low_f32(va5), 0);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4c0, vb0123c0);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5c0, vb0123c0);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c0, vb4567c0);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4c0, vb4567c0);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5c0, vb4567c0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
const float32x4_t va0c1 = vdupq_lane_f32(vget_low_f32(va0), 1);
const float32x4_t va1c1 = vdupq_lane_f32(vget_low_f32(va1), 1);
const float32x4_t va2c1 = vdupq_lane_f32(vget_low_f32(va2), 1);
const float32x4_t va3c1 = vdupq_lane_f32(vget_low_f32(va3), 1);
const float32x4_t va4c1 = vdupq_lane_f32(vget_low_f32(va4), 1);
const float32x4_t va5c1 = vdupq_lane_f32(vget_low_f32(va5), 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4c1, vb0123c1);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5c1, vb0123c1);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c1, vb4567c1);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4c1, vb4567c1);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5c1, vb4567c1);
const float32x4_t vb0123c2 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c2 = vld1q_f32(w); w += 4;
const float32x4_t va0c2 = vdupq_lane_f32(vget_high_f32(va0), 0);
const float32x4_t va1c2 = vdupq_lane_f32(vget_high_f32(va1), 0);
const float32x4_t va2c2 = vdupq_lane_f32(vget_high_f32(va2), 0);
const float32x4_t va3c2 = vdupq_lane_f32(vget_high_f32(va3), 0);
const float32x4_t va4c2 = vdupq_lane_f32(vget_high_f32(va4), 0);
const float32x4_t va5c2 = vdupq_lane_f32(vget_high_f32(va5), 0);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c2, vb0123c2);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c2, vb0123c2);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c2, vb0123c2);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c2, vb0123c2);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4c2, vb0123c2);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5c2, vb0123c2);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c2, vb4567c2);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c2, vb4567c2);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c2, vb4567c2);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c2, vb4567c2);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4c2, vb4567c2);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5c2, vb4567c2);
const float32x4_t vb0123c3 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c3 = vld1q_f32(w); w += 4;
const float32x4_t va0c3 = vdupq_lane_f32(vget_high_f32(va0), 1);
const float32x4_t va1c3 = vdupq_lane_f32(vget_high_f32(va1), 1);
const float32x4_t va2c3 = vdupq_lane_f32(vget_high_f32(va2), 1);
const float32x4_t va3c3 = vdupq_lane_f32(vget_high_f32(va3), 1);
const float32x4_t va4c3 = vdupq_lane_f32(vget_high_f32(va4), 1);
const float32x4_t va5c3 = vdupq_lane_f32(vget_high_f32(va5), 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c3, vb0123c3);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c3, vb0123c3);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c3, vb0123c3);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c3, vb0123c3);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4c3, vb0123c3);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5c3, vb0123c3);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c3, vb4567c3);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c3, vb4567c3);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c3, vb4567c3);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c3, vb4567c3);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4c3, vb4567c3);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5c3, vb4567c3);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567);
k -= sizeof(float);
} while (k != 0);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 13,173 | 40.040498 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-6x8-minmax-neonfma-dup-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_6x8__neonfma_dup_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc0x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc1x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc2x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc3x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc4x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc4x4567 = vld1q_f32(acc); acc += 4;
float32x4_t vacc5x0123 = vld1q_f32(acc); acc += 4;
float32x4_t vacc5x4567 = vld1q_f32(acc); acc += 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const float32x2_t va5 = vld1_f32(a5); a5 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
const float32x4_t va1c0 = vdupq_lane_f32(va1, 0);
const float32x4_t va2c0 = vdupq_lane_f32(va2, 0);
const float32x4_t va3c0 = vdupq_lane_f32(va3, 0);
const float32x4_t va4c0 = vdupq_lane_f32(va4, 0);
const float32x4_t va5c0 = vdupq_lane_f32(va5, 0);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4c0, vb0123c0);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5c0, vb0123c0);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c0, vb4567c0);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4c0, vb4567c0);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5c0, vb4567c0);
const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
const float32x4_t va1c1 = vdupq_lane_f32(va1, 1);
const float32x4_t va2c1 = vdupq_lane_f32(va2, 1);
const float32x4_t va3c1 = vdupq_lane_f32(va3, 1);
const float32x4_t va4c1 = vdupq_lane_f32(va4, 1);
const float32x4_t va5c1 = vdupq_lane_f32(va5, 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4c1, vb0123c1);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5c1, vb0123c1);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c1, vb4567c1);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4c1, vb4567c1);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5c1, vb4567c1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,390 | 37.485185 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-6x8-minmax-wasmrelaxedsimd-fma-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_6x8__wasmrelaxedsimd_fma_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
v128_t vacc4x0123 = wasm_v128_load(acc + 32);
v128_t vacc4x4567 = wasm_v128_load(acc + 36);
v128_t vacc5x0123 = wasm_v128_load(acc + 40);
v128_t vacc5x4567 = wasm_v128_load(acc + 44);
acc += 48;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t va5 = wasm_v128_load32_splat(a5);
a5 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123, vacc5x0123);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567, vacc5x4567);
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,800 | 36.135021 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-6x8-minmax-wasmrelaxedsimd-fma-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_6x8__wasmrelaxedsimd_fma_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
v128_t vacc4x0123 = wasm_v128_load(acc + 32);
v128_t vacc4x4567 = wasm_v128_load(acc + 36);
v128_t vacc5x0123 = wasm_v128_load(acc + 40);
v128_t vacc5x4567 = wasm_v128_load(acc + 44);
acc += 48;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va5 = wasm_v128_load(a5);
a5 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb0123c0, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c0, vb0123c0, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb4567c0, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c0, vb4567c0, vacc5x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb0123c1, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c1, vb0123c1, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb4567c1, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c1, vb4567c1, vacc5x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb0123c2, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c2, vb0123c2, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb4567c2, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c2, vb4567c2, vacc5x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb0123c3, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5c3, vb0123c3, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb4567c3, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5c3, vb4567c3, vacc5x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t va5 = wasm_v128_load32_splat(a5);
a5 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567, vacc5x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 15,414 | 43.811047 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-6x8-minmax-wasmrelaxedsimd-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_6x8__wasmrelaxedsimd_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
v128_t vacc4x0123 = wasm_v128_load(acc + 32);
v128_t vacc4x4567 = wasm_v128_load(acc + 36);
v128_t vacc5x0123 = wasm_v128_load(acc + 40);
v128_t vacc5x4567 = wasm_v128_load(acc + 44);
acc += 48;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t va5 = wasm_v128_load32_splat(a5);
a5 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,760 | 35.966245 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-6x8-minmax-wasmrelaxedsimd-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_6x8__wasmrelaxedsimd_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
v128_t vacc4x0123 = wasm_v128_load(acc + 32);
v128_t vacc4x4567 = wasm_v128_load(acc + 36);
v128_t vacc5x0123 = wasm_v128_load(acc + 40);
v128_t vacc5x4567 = wasm_v128_load(acc + 44);
acc += 48;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va5 = wasm_v128_load(a5);
a5 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb0123c0), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb4567c0), vacc5x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb0123c1), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb4567c1), vacc5x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb0123c2), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb4567c2), vacc5x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb0123c3), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb4567c3), vacc5x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t va5 = wasm_v128_load32_splat(a5);
a5 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567), vacc5x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 15,230 | 43.276163 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-6x8-minmax-wasmsimd-arm-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
v128_t vacc4x0123 = wasm_v128_load(acc + 32);
v128_t vacc4x4567 = wasm_v128_load(acc + 36);
v128_t vacc5x0123 = wasm_v128_load(acc + 40);
v128_t vacc5x4567 = wasm_v128_load(acc + 44);
acc += 48;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t va5 = wasm_v128_load32_splat(a5);
a5 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_max(vmin, vacc4x0123);
vacc5x0123 = wasm_f32x4_max(vmin, vacc5x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_max(vmin, vacc4x4567);
vacc5x4567 = wasm_f32x4_max(vmin, vacc5x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_min(vmax, vacc4x0123);
vacc5x0123 = wasm_f32x4_min(vmax, vacc5x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_min(vmax, vacc4x4567);
vacc5x4567 = wasm_f32x4_min(vmax, vacc5x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,325 | 34.130802 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-6x8-minmax-wasmsimd-arm-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
v128_t vacc4x0123 = wasm_v128_load(acc + 32);
v128_t vacc4x4567 = wasm_v128_load(acc + 36);
v128_t vacc5x0123 = wasm_v128_load(acc + 40);
v128_t vacc5x4567 = wasm_v128_load(acc + 44);
acc += 48;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va5 = wasm_v128_load(a5);
a5 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb0123c0), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb4567c0), vacc5x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb0123c1), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb4567c1), vacc5x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb0123c2), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb4567c2), vacc5x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb0123c3), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb4567c3), vacc5x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t va5 = wasm_v128_load32_splat(a5);
a5 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567), vacc5x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_max(vmin, vacc4x0123);
vacc5x0123 = wasm_f32x4_max(vmin, vacc5x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_max(vmin, vacc4x4567);
vacc5x4567 = wasm_f32x4_max(vmin, vacc5x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_min(vmax, vacc4x0123);
vacc5x0123 = wasm_f32x4_min(vmax, vacc5x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_min(vmax, vacc4x4567);
vacc5x4567 = wasm_f32x4_min(vmax, vacc5x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,795 | 42.011628 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-6x8-minmax-wasmsimd-x86-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
v128_t vacc4x0123 = wasm_v128_load(acc + 32);
v128_t vacc4x4567 = wasm_v128_load(acc + 36);
v128_t vacc5x0123 = wasm_v128_load(acc + 40);
v128_t vacc5x4567 = wasm_v128_load(acc + 44);
acc += 48;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t va5 = wasm_v128_load32_splat(a5);
a5 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123));
vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567));
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmax(vmin, vacc4x0123);
vacc5x0123 = wasm_f32x4_pmax(vmin, vacc5x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmax(vmin, vacc4x4567);
vacc5x4567 = wasm_f32x4_pmax(vmin, vacc5x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmin(vmax, vacc4x0123);
vacc5x0123 = wasm_f32x4_pmin(vmax, vacc5x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmin(vmax, vacc4x4567);
vacc5x4567 = wasm_f32x4_pmin(vmax, vacc5x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,349 | 34.232068 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-6x8-minmax-wasmsimd-x86-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
v128_t vacc4x0123 = wasm_v128_load(acc + 32);
v128_t vacc4x4567 = wasm_v128_load(acc + 36);
v128_t vacc5x0123 = wasm_v128_load(acc + 40);
v128_t vacc5x4567 = wasm_v128_load(acc + 44);
acc += 48;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va5 = wasm_v128_load(a5);
a5 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb0123c0), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c0, vb4567c0), vacc5x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t va5c1 = wasm_v32x4_shuffle(va5, va5, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb0123c1), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c1, vb4567c1), vacc5x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t va5c2 = wasm_v32x4_shuffle(va5, va5, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb0123c2), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c2, vb4567c2), vacc5x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb0123c3), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5c3, vb4567c3), vacc5x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t va5 = wasm_v128_load32_splat(a5);
a5 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567), vacc5x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmax(vmin, vacc4x0123);
vacc5x0123 = wasm_f32x4_pmax(vmin, vacc5x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmax(vmin, vacc4x4567);
vacc5x4567 = wasm_f32x4_pmax(vmin, vacc5x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmin(vmax, vacc4x0123);
vacc5x0123 = wasm_f32x4_pmin(vmax, vacc5x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmin(vmax, vacc4x4567);
vacc5x4567 = wasm_f32x4_pmin(vmax, vacc5x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,819 | 42.081395 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-6x8s4-minmax-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_6x8s4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
v128_t vacc4x0123 = wasm_v128_load(acc + 32);
v128_t vacc4x4567 = wasm_v128_load(acc + 36);
v128_t vacc5x0123 = wasm_v128_load(acc + 40);
v128_t vacc5x4567 = wasm_v128_load(acc + 44);
acc += 48;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
v128_t va4 = wasm_v128_load(a4);
a4 += 4;
v128_t va5 = wasm_v128_load(a5);
a5 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c0, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c0, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c0, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c0, vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c1, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c1, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c1, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c1, vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c2, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c2, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c2, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c2, vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123c3, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(va5, vb0123c3, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567c3, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(va5, vb4567c3, vacc5x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
v128_t va4 = wasm_v128_load(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
v128_t va5 = wasm_v128_load(a5);
a5 = (const float*) ((uintptr_t) a5 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc5x4567);
w += 32;
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 21,303 | 50.960976 | 130 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-6x8s4-minmax-wasmrelaxedsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_6x8s4__wasmrelaxedsimd(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
v128_t vacc4x0123 = wasm_v128_load(acc + 32);
v128_t vacc4x4567 = wasm_v128_load(acc + 36);
v128_t vacc5x0123 = wasm_v128_load(acc + 40);
v128_t vacc5x4567 = wasm_v128_load(acc + 44);
acc += 48;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
v128_t va4 = wasm_v128_load(a4);
a4 += 4;
v128_t va5 = wasm_v128_load(a5);
a5 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c0), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c0), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c1), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c1), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c2), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c2), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c3), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c3), vacc5x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
v128_t va4 = wasm_v128_load(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
v128_t va5 = wasm_v128_load(a5);
a5 = (const float*) ((uintptr_t) a5 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc5x4567);
w += 32;
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc5x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
vacc5x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 21,011 | 50.24878 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-6x8s4-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_6x8s4__wasmsimd_arm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
v128_t vacc4x0123 = wasm_v128_load(acc + 32);
v128_t vacc4x4567 = wasm_v128_load(acc + 36);
v128_t vacc5x0123 = wasm_v128_load(acc + 40);
v128_t vacc5x4567 = wasm_v128_load(acc + 44);
acc += 48;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
v128_t va4 = wasm_v128_load(a4);
a4 += 4;
v128_t va5 = wasm_v128_load(a5);
a5 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c0), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c0), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c1), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c1), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c2), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c2), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c3), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c3), vacc5x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
v128_t va4 = wasm_v128_load(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
v128_t va5 = wasm_v128_load(a5);
a5 = (const float*) ((uintptr_t) a5 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc5x4567);
w += 32;
}
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_max(vmin, vacc4x0123);
vacc5x0123 = wasm_f32x4_max(vmin, vacc5x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_max(vmin, vacc4x4567);
vacc5x4567 = wasm_f32x4_max(vmin, vacc5x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_min(vmax, vacc4x0123);
vacc5x0123 = wasm_f32x4_min(vmax, vacc5x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_min(vmax, vacc4x4567);
vacc5x4567 = wasm_f32x4_min(vmax, vacc5x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 20,576 | 49.187805 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-6x8s4-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_6x8s4__wasmsimd_x86(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(acc + 0);
v128_t vacc0x4567 = wasm_v128_load(acc + 4);
v128_t vacc1x0123 = wasm_v128_load(acc + 8);
v128_t vacc1x4567 = wasm_v128_load(acc + 12);
v128_t vacc2x0123 = wasm_v128_load(acc + 16);
v128_t vacc2x4567 = wasm_v128_load(acc + 20);
v128_t vacc3x0123 = wasm_v128_load(acc + 24);
v128_t vacc3x4567 = wasm_v128_load(acc + 28);
v128_t vacc4x0123 = wasm_v128_load(acc + 32);
v128_t vacc4x4567 = wasm_v128_load(acc + 36);
v128_t vacc5x0123 = wasm_v128_load(acc + 40);
v128_t vacc5x4567 = wasm_v128_load(acc + 44);
acc += 48;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
v128_t va4 = wasm_v128_load(a4);
a4 += 4;
v128_t va5 = wasm_v128_load(a5);
a5 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c0), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c0), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c0), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c0), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c1), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c1), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c1), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c1), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c2), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c2), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c2), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c2), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123c3), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb0123c3), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567c3), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(va5, vb4567c3), vacc5x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
v128_t va4 = wasm_v128_load(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
v128_t va5 = wasm_v128_load(a5);
a5 = (const float*) ((uintptr_t) a5 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc5x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc4x0123);
vacc5x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc5x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc4x4567);
vacc5x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc5x4567);
w += 32;
}
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmax(vmin, vacc4x0123);
vacc5x0123 = wasm_f32x4_pmax(vmin, vacc5x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmax(vmin, vacc4x4567);
vacc5x4567 = wasm_f32x4_pmax(vmin, vacc5x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmin(vmax, vacc4x0123);
vacc5x0123 = wasm_f32x4_pmin(vmax, vacc5x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmin(vmax, vacc4x4567);
vacc5x4567 = wasm_f32x4_pmin(vmax, vacc5x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c5, vacc5x0123);
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c5, vacc5x0123, 0);
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc5x0123 = wasm_v64x2_shuffle(vacc5x0123, vacc5x0123, 1, 1);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c5, vacc5x0123, 0);
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 20,600 | 49.246341 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-7x16-minmax-avx512f-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx512-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_gemminc_minmax_ukernel_7x16__avx512f_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 7);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
a5 = a4;
c5 = c4;
}
const float* a6 = (const float*) ((uintptr_t) a5 + a_stride);
float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
a6 = a5;
c6 = c5;
}
do {
__m512 vacc0x0123456789ABCDEF = _mm512_load_ps(acc + 0);
__m512 vacc1x0123456789ABCDEF = _mm512_load_ps(acc + 16);
__m512 vacc2x0123456789ABCDEF = _mm512_load_ps(acc + 32);
__m512 vacc3x0123456789ABCDEF = _mm512_load_ps(acc + 48);
__m512 vacc4x0123456789ABCDEF = _mm512_load_ps(acc + 64);
__m512 vacc5x0123456789ABCDEF = _mm512_load_ps(acc + 80);
__m512 vacc6x0123456789ABCDEF = _mm512_load_ps(acc + 96);
acc += 112;
size_t k = kc;
do {
const __m512 vb0123456789ABCDEF = _mm512_load_ps(w);
w += 16;
const __m512 va0 = _mm512_set1_ps(*a0);
vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
const __m512 va1 = _mm512_set1_ps(*a1);
vacc1x0123456789ABCDEF = _mm512_fmadd_ps(va1, vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
const __m512 va2 = _mm512_set1_ps(*a2);
vacc2x0123456789ABCDEF = _mm512_fmadd_ps(va2, vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
const __m512 va3 = _mm512_set1_ps(*a3);
vacc3x0123456789ABCDEF = _mm512_fmadd_ps(va3, vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
const __m512 va4 = _mm512_set1_ps(*a4);
vacc4x0123456789ABCDEF = _mm512_fmadd_ps(va4, vb0123456789ABCDEF, vacc4x0123456789ABCDEF);
const __m512 va5 = _mm512_set1_ps(*a5);
vacc5x0123456789ABCDEF = _mm512_fmadd_ps(va5, vb0123456789ABCDEF, vacc5x0123456789ABCDEF);
const __m512 va6 = _mm512_set1_ps(*a6);
vacc6x0123456789ABCDEF = _mm512_fmadd_ps(va6, vb0123456789ABCDEF, vacc6x0123456789ABCDEF);
a0 += 1;
a1 += 1;
a2 += 1;
a3 += 1;
a4 += 1;
a5 += 1;
a6 += 1;
k -= sizeof(float);
} while (k != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
vacc0x0123456789ABCDEF = _mm512_max_ps(vmin, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_max_ps(vmin, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_max_ps(vmin, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_max_ps(vmin, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_max_ps(vmin, vacc4x0123456789ABCDEF);
vacc5x0123456789ABCDEF = _mm512_max_ps(vmin, vacc5x0123456789ABCDEF);
vacc6x0123456789ABCDEF = _mm512_max_ps(vmin, vacc6x0123456789ABCDEF);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
vacc0x0123456789ABCDEF = _mm512_min_ps(vmax, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_min_ps(vmax, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_min_ps(vmax, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_min_ps(vmax, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_min_ps(vmax, vacc4x0123456789ABCDEF);
vacc5x0123456789ABCDEF = _mm512_min_ps(vmax, vacc5x0123456789ABCDEF);
vacc6x0123456789ABCDEF = _mm512_min_ps(vmax, vacc6x0123456789ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm512_storeu_ps(c6, vacc6x0123456789ABCDEF);
c6 = (float*) ((uintptr_t) c6 + cn_stride);
_mm512_storeu_ps(c5, vacc5x0123456789ABCDEF);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
_mm512_storeu_ps(c4, vacc4x0123456789ABCDEF);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a6 = (const float*) ((uintptr_t) a6 - kc);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 15) {
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
_mm512_mask_storeu_ps(c6, vmask, vacc6x0123456789ABCDEF);
_mm512_mask_storeu_ps(c5, vmask, vacc5x0123456789ABCDEF);
_mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF);
_mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
_mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
_mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
_mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
}
nc = 0;
}
} while (nc != 0);
}
| 6,783 | 36.274725 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-7x8-minmax-avx-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_7x8__avx_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 7);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
a5 = a4;
c5 = c4;
}
const float* a6 = (const float*) ((uintptr_t) a5 + a_stride);
float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
a6 = a5;
c6 = c5;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
__m256 vacc1x01234567 = _mm256_load_ps(acc + 8);
__m256 vacc2x01234567 = _mm256_load_ps(acc + 16);
__m256 vacc3x01234567 = _mm256_load_ps(acc + 24);
__m256 vacc4x01234567 = _mm256_load_ps(acc + 32);
__m256 vacc5x01234567 = _mm256_load_ps(acc + 40);
__m256 vacc6x01234567 = _mm256_load_ps(acc + 48);
acc += 56;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
const __m256 va5 = _mm256_broadcast_ss(a5);
a5 += 1;
const __m256 va6 = _mm256_broadcast_ss(a6);
a6 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
w += 8;
vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
vacc4x01234567 = _mm256_add_ps(vacc4x01234567, _mm256_mul_ps(va4, vb01234567));
vacc5x01234567 = _mm256_add_ps(vacc5x01234567, _mm256_mul_ps(va5, vb01234567));
vacc6x01234567 = _mm256_add_ps(vacc6x01234567, _mm256_mul_ps(va6, vb01234567));
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
vacc5x01234567 = _mm256_max_ps(vmin, vacc5x01234567);
vacc6x01234567 = _mm256_max_ps(vmin, vacc6x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
vacc5x01234567 = _mm256_min_ps(vmax, vacc5x01234567);
vacc6x01234567 = _mm256_min_ps(vmax, vacc6x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c6, vacc6x01234567);
c6 = (float*) ((uintptr_t) c6 + cn_stride);
_mm256_storeu_ps(c5, vacc5x01234567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
_mm256_storeu_ps(c4, vacc4x01234567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a6 = (const float*) ((uintptr_t) a6 - kc);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
__m128 vacc6x0123 = _mm256_castps256_ps128(vacc6x01234567);
__m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567);
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c6, vacc6x0123);
_mm_storeu_ps(c5, vacc5x0123);
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc6x0123 = _mm256_extractf128_ps(vacc6x01234567, 1);
vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c6 += 4;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c6, vacc6x0123);
_mm_storel_pi((__m64*) c5, vacc5x0123);
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc6x0123 = _mm_movehl_ps(vacc6x0123, vacc6x0123);
vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c6 += 2;
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c6, vacc6x0123);
_mm_store_ss(c5, vacc5x0123);
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 8,188 | 33.846809 | 85 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-7x8-minmax-fma3-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_7x8__fma3_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 7);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
a5 = a4;
c5 = c4;
}
const float* a6 = (const float*) ((uintptr_t) a5 + a_stride);
float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
a6 = a5;
c6 = c5;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
__m256 vacc1x01234567 = _mm256_load_ps(acc + 8);
__m256 vacc2x01234567 = _mm256_load_ps(acc + 16);
__m256 vacc3x01234567 = _mm256_load_ps(acc + 24);
__m256 vacc4x01234567 = _mm256_load_ps(acc + 32);
__m256 vacc5x01234567 = _mm256_load_ps(acc + 40);
__m256 vacc6x01234567 = _mm256_load_ps(acc + 48);
acc += 56;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
const __m256 va5 = _mm256_broadcast_ss(a5);
a5 += 1;
const __m256 va6 = _mm256_broadcast_ss(a6);
a6 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
w += 8;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
vacc5x01234567 = _mm256_fmadd_ps(va5, vb01234567, vacc5x01234567);
vacc6x01234567 = _mm256_fmadd_ps(va6, vb01234567, vacc6x01234567);
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
vacc5x01234567 = _mm256_max_ps(vmin, vacc5x01234567);
vacc6x01234567 = _mm256_max_ps(vmin, vacc6x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
vacc5x01234567 = _mm256_min_ps(vmax, vacc5x01234567);
vacc6x01234567 = _mm256_min_ps(vmax, vacc6x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c6, vacc6x01234567);
c6 = (float*) ((uintptr_t) c6 + cn_stride);
_mm256_storeu_ps(c5, vacc5x01234567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
_mm256_storeu_ps(c4, vacc4x01234567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a6 = (const float*) ((uintptr_t) a6 - kc);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
__m128 vacc6x0123 = _mm256_castps256_ps128(vacc6x01234567);
__m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567);
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c6, vacc6x0123);
_mm_storeu_ps(c5, vacc5x0123);
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc6x0123 = _mm256_extractf128_ps(vacc6x01234567, 1);
vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c6 += 4;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c6, vacc6x0123);
_mm_storel_pi((__m64*) c5, vacc5x0123);
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc6x0123 = _mm_movehl_ps(vacc6x0123, vacc6x0123);
vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c6 += 2;
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c6, vacc6x0123);
_mm_store_ss(c5, vacc5x0123);
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 8,098 | 33.46383 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-8x16-minmax-avx512f-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx512-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_gemminc_minmax_ukernel_8x16__avx512f_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 8);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
a5 = a4;
c5 = c4;
}
const float* a6 = (const float*) ((uintptr_t) a5 + a_stride);
float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
a6 = a5;
c6 = c5;
}
const float* a7 = (const float*) ((uintptr_t) a6 + a_stride);
float* c7 = (float*) ((uintptr_t) c6 + cm_stride);
if XNN_UNPREDICTABLE(mr != 8) {
a7 = a6;
c7 = c6;
}
do {
__m512 vacc0x0123456789ABCDEF = _mm512_load_ps(acc + 0);
__m512 vacc1x0123456789ABCDEF = _mm512_load_ps(acc + 16);
__m512 vacc2x0123456789ABCDEF = _mm512_load_ps(acc + 32);
__m512 vacc3x0123456789ABCDEF = _mm512_load_ps(acc + 48);
__m512 vacc4x0123456789ABCDEF = _mm512_load_ps(acc + 64);
__m512 vacc5x0123456789ABCDEF = _mm512_load_ps(acc + 80);
__m512 vacc6x0123456789ABCDEF = _mm512_load_ps(acc + 96);
__m512 vacc7x0123456789ABCDEF = _mm512_load_ps(acc + 112);
acc += 128;
size_t k = kc;
do {
const __m512 vb0123456789ABCDEF = _mm512_load_ps(w);
w += 16;
const __m512 va0 = _mm512_set1_ps(*a0);
vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
const __m512 va1 = _mm512_set1_ps(*a1);
vacc1x0123456789ABCDEF = _mm512_fmadd_ps(va1, vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
const __m512 va2 = _mm512_set1_ps(*a2);
vacc2x0123456789ABCDEF = _mm512_fmadd_ps(va2, vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
const __m512 va3 = _mm512_set1_ps(*a3);
vacc3x0123456789ABCDEF = _mm512_fmadd_ps(va3, vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
const __m512 va4 = _mm512_set1_ps(*a4);
vacc4x0123456789ABCDEF = _mm512_fmadd_ps(va4, vb0123456789ABCDEF, vacc4x0123456789ABCDEF);
const __m512 va5 = _mm512_set1_ps(*a5);
vacc5x0123456789ABCDEF = _mm512_fmadd_ps(va5, vb0123456789ABCDEF, vacc5x0123456789ABCDEF);
const __m512 va6 = _mm512_set1_ps(*a6);
vacc6x0123456789ABCDEF = _mm512_fmadd_ps(va6, vb0123456789ABCDEF, vacc6x0123456789ABCDEF);
const __m512 va7 = _mm512_set1_ps(*a7);
vacc7x0123456789ABCDEF = _mm512_fmadd_ps(va7, vb0123456789ABCDEF, vacc7x0123456789ABCDEF);
a0 += 1;
a1 += 1;
a2 += 1;
a3 += 1;
a4 += 1;
a5 += 1;
a6 += 1;
a7 += 1;
k -= sizeof(float);
} while (k != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
vacc0x0123456789ABCDEF = _mm512_max_ps(vmin, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_max_ps(vmin, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_max_ps(vmin, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_max_ps(vmin, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_max_ps(vmin, vacc4x0123456789ABCDEF);
vacc5x0123456789ABCDEF = _mm512_max_ps(vmin, vacc5x0123456789ABCDEF);
vacc6x0123456789ABCDEF = _mm512_max_ps(vmin, vacc6x0123456789ABCDEF);
vacc7x0123456789ABCDEF = _mm512_max_ps(vmin, vacc7x0123456789ABCDEF);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
vacc0x0123456789ABCDEF = _mm512_min_ps(vmax, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_min_ps(vmax, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_min_ps(vmax, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_min_ps(vmax, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_min_ps(vmax, vacc4x0123456789ABCDEF);
vacc5x0123456789ABCDEF = _mm512_min_ps(vmax, vacc5x0123456789ABCDEF);
vacc6x0123456789ABCDEF = _mm512_min_ps(vmax, vacc6x0123456789ABCDEF);
vacc7x0123456789ABCDEF = _mm512_min_ps(vmax, vacc7x0123456789ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm512_storeu_ps(c7, vacc7x0123456789ABCDEF);
c7 = (float*) ((uintptr_t) c7 + cn_stride);
_mm512_storeu_ps(c6, vacc6x0123456789ABCDEF);
c6 = (float*) ((uintptr_t) c6 + cn_stride);
_mm512_storeu_ps(c5, vacc5x0123456789ABCDEF);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
_mm512_storeu_ps(c4, vacc4x0123456789ABCDEF);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a7 = (const float*) ((uintptr_t) a7 - kc);
a6 = (const float*) ((uintptr_t) a6 - kc);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 15) {
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
_mm512_mask_storeu_ps(c7, vmask, vacc7x0123456789ABCDEF);
_mm512_mask_storeu_ps(c6, vmask, vacc6x0123456789ABCDEF);
_mm512_mask_storeu_ps(c5, vmask, vacc5x0123456789ABCDEF);
_mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF);
_mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
_mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
_mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
_mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
}
nc = 0;
}
} while (nc != 0);
}
| 7,550 | 37.136364 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemminc/gen/f32-gemminc-8x8-minmax-fma3-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemminc_minmax_ukernel_8x8__fma3_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const float* restrict acc,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 8);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
a5 = a4;
c5 = c4;
}
const float* a6 = (const float*) ((uintptr_t) a5 + a_stride);
float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
a6 = a5;
c6 = c5;
}
const float* a7 = (const float*) ((uintptr_t) a6 + a_stride);
float* c7 = (float*) ((uintptr_t) c6 + cm_stride);
if XNN_UNPREDICTABLE(mr != 8) {
a7 = a6;
c7 = c6;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
__m256 vacc1x01234567 = _mm256_load_ps(acc + 8);
__m256 vacc2x01234567 = _mm256_load_ps(acc + 16);
__m256 vacc3x01234567 = _mm256_load_ps(acc + 24);
__m256 vacc4x01234567 = _mm256_load_ps(acc + 32);
__m256 vacc5x01234567 = _mm256_load_ps(acc + 40);
__m256 vacc6x01234567 = _mm256_load_ps(acc + 48);
__m256 vacc7x01234567 = _mm256_load_ps(acc + 56);
acc += 64;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
const __m256 va5 = _mm256_broadcast_ss(a5);
a5 += 1;
const __m256 va6 = _mm256_broadcast_ss(a6);
a6 += 1;
const __m256 va7 = _mm256_broadcast_ss(a7);
a7 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
w += 8;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
vacc5x01234567 = _mm256_fmadd_ps(va5, vb01234567, vacc5x01234567);
vacc6x01234567 = _mm256_fmadd_ps(va6, vb01234567, vacc6x01234567);
vacc7x01234567 = _mm256_fmadd_ps(va7, vb01234567, vacc7x01234567);
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
vacc5x01234567 = _mm256_max_ps(vmin, vacc5x01234567);
vacc6x01234567 = _mm256_max_ps(vmin, vacc6x01234567);
vacc7x01234567 = _mm256_max_ps(vmin, vacc7x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
vacc5x01234567 = _mm256_min_ps(vmax, vacc5x01234567);
vacc6x01234567 = _mm256_min_ps(vmax, vacc6x01234567);
vacc7x01234567 = _mm256_min_ps(vmax, vacc7x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c7, vacc7x01234567);
c7 = (float*) ((uintptr_t) c7 + cn_stride);
_mm256_storeu_ps(c6, vacc6x01234567);
c6 = (float*) ((uintptr_t) c6 + cn_stride);
_mm256_storeu_ps(c5, vacc5x01234567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
_mm256_storeu_ps(c4, vacc4x01234567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a7 = (const float*) ((uintptr_t) a7 - kc);
a6 = (const float*) ((uintptr_t) a6 - kc);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
__m128 vacc7x0123 = _mm256_castps256_ps128(vacc7x01234567);
__m128 vacc6x0123 = _mm256_castps256_ps128(vacc6x01234567);
__m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567);
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c7, vacc7x0123);
_mm_storeu_ps(c6, vacc6x0123);
_mm_storeu_ps(c5, vacc5x0123);
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc7x0123 = _mm256_extractf128_ps(vacc7x01234567, 1);
vacc6x0123 = _mm256_extractf128_ps(vacc6x01234567, 1);
vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c7 += 4;
c6 += 4;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c7, vacc7x0123);
_mm_storel_pi((__m64*) c6, vacc6x0123);
_mm_storel_pi((__m64*) c5, vacc5x0123);
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc7x0123 = _mm_movehl_ps(vacc7x0123, vacc7x0123);
vacc6x0123 = _mm_movehl_ps(vacc6x0123, vacc6x0123);
vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c7 += 2;
c6 += 2;
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c7, vacc7x0123);
_mm_store_ss(c6, vacc6x0123);
_mm_store_ss(c5, vacc5x0123);
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 9,078 | 34.189922 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ibilinear-chw/gen/f32-ibilinear-chw-neon-p16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ibilinear-chw/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/ibilinear.h>
void xnn_f32_ibilinear_chw_ukernel__neon_p16(
size_t output_pixels,
size_t channels,
const float** restrict input,
size_t input_offset,
const float* restrict weights,
float* restrict output,
size_t input_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
assert(input_increment % sizeof(float) == 0);
do {
const float** i = input;
const float* w = weights;
size_t p = output_pixels;
for (; p >= 16; p -= 16) {
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
const float* itl2 = (const float*) ((uintptr_t) i[4] + input_offset);
const float* ibl2 = (const float*) ((uintptr_t) i[5] + input_offset);
const float* itl3 = (const float*) ((uintptr_t) i[6] + input_offset);
const float* ibl3 = (const float*) ((uintptr_t) i[7] + input_offset);
const float* itl4 = (const float*) ((uintptr_t) i[8] + input_offset);
const float* ibl4 = (const float*) ((uintptr_t) i[9] + input_offset);
const float* itl5 = (const float*) ((uintptr_t) i[10] + input_offset);
const float* ibl5 = (const float*) ((uintptr_t) i[11] + input_offset);
const float* itl6 = (const float*) ((uintptr_t) i[12] + input_offset);
const float* ibl6 = (const float*) ((uintptr_t) i[13] + input_offset);
const float* itl7 = (const float*) ((uintptr_t) i[14] + input_offset);
const float* ibl7 = (const float*) ((uintptr_t) i[15] + input_offset);
const float* itl8 = (const float*) ((uintptr_t) i[16] + input_offset);
const float* ibl8 = (const float*) ((uintptr_t) i[17] + input_offset);
const float* itl9 = (const float*) ((uintptr_t) i[18] + input_offset);
const float* ibl9 = (const float*) ((uintptr_t) i[19] + input_offset);
const float* itlA = (const float*) ((uintptr_t) i[20] + input_offset);
const float* iblA = (const float*) ((uintptr_t) i[21] + input_offset);
const float* itlB = (const float*) ((uintptr_t) i[22] + input_offset);
const float* iblB = (const float*) ((uintptr_t) i[23] + input_offset);
const float* itlC = (const float*) ((uintptr_t) i[24] + input_offset);
const float* iblC = (const float*) ((uintptr_t) i[25] + input_offset);
const float* itlD = (const float*) ((uintptr_t) i[26] + input_offset);
const float* iblD = (const float*) ((uintptr_t) i[27] + input_offset);
const float* itlE = (const float*) ((uintptr_t) i[28] + input_offset);
const float* iblE = (const float*) ((uintptr_t) i[29] + input_offset);
const float* itlF = (const float*) ((uintptr_t) i[30] + input_offset);
const float* iblF = (const float*) ((uintptr_t) i[31] + input_offset);
i += 2 * 16;
const float32x4x2_t vw0123 = vld2q_f32(w + 0);
const float32x4x2_t vw4567 = vld2q_f32(w + 8);
const float32x4x2_t vw89AB = vld2q_f32(w + 16);
const float32x4x2_t vwCDEF = vld2q_f32(w + 24);
w += 2 * 16;
const float32x2_t vtltr0 = vld1_f32(itl0);
const float32x2_t vblbr0 = vld1_f32(ibl0);
const float32x2_t vtltr1 = vld1_f32(itl1);
const float32x2_t vblbr1 = vld1_f32(ibl1);
const float32x2_t vtltr2 = vld1_f32(itl2);
const float32x2_t vblbr2 = vld1_f32(ibl2);
const float32x2_t vtltr3 = vld1_f32(itl3);
const float32x2_t vblbr3 = vld1_f32(ibl3);
const float32x2_t vtltr4 = vld1_f32(itl4);
const float32x2_t vblbr4 = vld1_f32(ibl4);
const float32x2_t vtltr5 = vld1_f32(itl5);
const float32x2_t vblbr5 = vld1_f32(ibl5);
const float32x2_t vtltr6 = vld1_f32(itl6);
const float32x2_t vblbr6 = vld1_f32(ibl6);
const float32x2_t vtltr7 = vld1_f32(itl7);
const float32x2_t vblbr7 = vld1_f32(ibl7);
const float32x2_t vtltr8 = vld1_f32(itl8);
const float32x2_t vblbr8 = vld1_f32(ibl8);
const float32x2_t vtltr9 = vld1_f32(itl9);
const float32x2_t vblbr9 = vld1_f32(ibl9);
const float32x2_t vtltrA = vld1_f32(itlA);
const float32x2_t vblbrA = vld1_f32(iblA);
const float32x2_t vtltrB = vld1_f32(itlB);
const float32x2_t vblbrB = vld1_f32(iblB);
const float32x2_t vtltrC = vld1_f32(itlC);
const float32x2_t vblbrC = vld1_f32(iblC);
const float32x2_t vtltrD = vld1_f32(itlD);
const float32x2_t vblbrD = vld1_f32(iblD);
const float32x2_t vtltrE = vld1_f32(itlE);
const float32x2_t vblbrE = vld1_f32(iblE);
const float32x2_t vtltrF = vld1_f32(itlF);
const float32x2_t vblbrF = vld1_f32(iblF);
const float32x4_t valphah0123 = vw0123.val[0];
const float32x4_t valphav0123 = vw0123.val[1];
const float32x4_t valphah4567 = vw4567.val[0];
const float32x4_t valphav4567 = vw4567.val[1];
const float32x4_t valphah89AB = vw89AB.val[0];
const float32x4_t valphav89AB = vw89AB.val[1];
const float32x4_t valphahCDEF = vwCDEF.val[0];
const float32x4_t valphavCDEF = vwCDEF.val[1];
const float32x4_t vtltr01 = vcombine_f32(vtltr0, vtltr1);
const float32x4_t vblbr01 = vcombine_f32(vblbr0, vblbr1);
const float32x4_t vtltr23 = vcombine_f32(vtltr2, vtltr3);
const float32x4_t vblbr23 = vcombine_f32(vblbr2, vblbr3);
const float32x4_t vtltr45 = vcombine_f32(vtltr4, vtltr5);
const float32x4_t vblbr45 = vcombine_f32(vblbr4, vblbr5);
const float32x4_t vtltr67 = vcombine_f32(vtltr6, vtltr7);
const float32x4_t vblbr67 = vcombine_f32(vblbr6, vblbr7);
const float32x4_t vtltr89 = vcombine_f32(vtltr8, vtltr9);
const float32x4_t vblbr89 = vcombine_f32(vblbr8, vblbr9);
const float32x4_t vtltrAB = vcombine_f32(vtltrA, vtltrB);
const float32x4_t vblbrAB = vcombine_f32(vblbrA, vblbrB);
const float32x4_t vtltrCD = vcombine_f32(vtltrC, vtltrD);
const float32x4_t vblbrCD = vcombine_f32(vblbrC, vblbrD);
const float32x4_t vtltrEF = vcombine_f32(vtltrE, vtltrF);
const float32x4_t vblbrEF = vcombine_f32(vblbrE, vblbrF);
const float32x4_t vldrd01 = vsubq_f32(vblbr01, vtltr01);
const float32x4_t vldrd23 = vsubq_f32(vblbr23, vtltr23);
const float32x4_t vldrd45 = vsubq_f32(vblbr45, vtltr45);
const float32x4_t vldrd67 = vsubq_f32(vblbr67, vtltr67);
const float32x4_t vldrd89 = vsubq_f32(vblbr89, vtltr89);
const float32x4_t vldrdAB = vsubq_f32(vblbrAB, vtltrAB);
const float32x4_t vldrdCD = vsubq_f32(vblbrCD, vtltrCD);
const float32x4_t vldrdEF = vsubq_f32(vblbrEF, vtltrEF);
const float32x4x2_t vld_t0123 = vuzpq_f32(vldrd01, vldrd23);
const float32x4_t vld0123 = vld_t0123.val[0];
const float32x4_t vrd0123 = vld_t0123.val[1];
const float32x4x2_t vld_t4567 = vuzpq_f32(vldrd45, vldrd67);
const float32x4_t vld4567 = vld_t4567.val[0];
const float32x4_t vrd4567 = vld_t4567.val[1];
const float32x4x2_t vld_t89AB = vuzpq_f32(vldrd89, vldrdAB);
const float32x4_t vld89AB = vld_t89AB.val[0];
const float32x4_t vrd89AB = vld_t89AB.val[1];
const float32x4x2_t vld_tCDEF = vuzpq_f32(vldrdCD, vldrdEF);
const float32x4_t vldCDEF = vld_tCDEF.val[0];
const float32x4_t vrdCDEF = vld_tCDEF.val[1];
const float32x4x2_t vtl_t0123 = vuzpq_f32(vtltr01, vtltr23);
const float32x4_t vtl0123 = vtl_t0123.val[0];
const float32x4_t vtr0123 = vtl_t0123.val[1];
const float32x4x2_t vtl_t4567 = vuzpq_f32(vtltr45, vtltr67);
const float32x4_t vtl4567 = vtl_t4567.val[0];
const float32x4_t vtr4567 = vtl_t4567.val[1];
const float32x4x2_t vtl_t89AB = vuzpq_f32(vtltr89, vtltrAB);
const float32x4_t vtl89AB = vtl_t89AB.val[0];
const float32x4_t vtr89AB = vtl_t89AB.val[1];
const float32x4x2_t vtl_tCDEF = vuzpq_f32(vtltrCD, vtltrEF);
const float32x4_t vtlCDEF = vtl_tCDEF.val[0];
const float32x4_t vtrCDEF = vtl_tCDEF.val[1];
const float32x4_t vl0123 = vmlaq_f32(vtl0123, vld0123, valphav0123);
const float32x4_t vr0123 = vmlaq_f32(vtr0123, vrd0123, valphav0123);
const float32x4_t vl4567 = vmlaq_f32(vtl4567, vld4567, valphav4567);
const float32x4_t vr4567 = vmlaq_f32(vtr4567, vrd4567, valphav4567);
const float32x4_t vl89AB = vmlaq_f32(vtl89AB, vld89AB, valphav89AB);
const float32x4_t vr89AB = vmlaq_f32(vtr89AB, vrd89AB, valphav89AB);
const float32x4_t vlCDEF = vmlaq_f32(vtlCDEF, vldCDEF, valphavCDEF);
const float32x4_t vrCDEF = vmlaq_f32(vtrCDEF, vrdCDEF, valphavCDEF);
const float32x4_t vd0123 = vsubq_f32(vr0123, vl0123);
const float32x4_t vd4567 = vsubq_f32(vr4567, vl4567);
const float32x4_t vd89AB = vsubq_f32(vr89AB, vl89AB);
const float32x4_t vdCDEF = vsubq_f32(vrCDEF, vlCDEF);
const float32x4_t vo0123 = vmlaq_f32(vl0123, vd0123, valphah0123);
const float32x4_t vo4567 = vmlaq_f32(vl4567, vd4567, valphah4567);
const float32x4_t vo89AB = vmlaq_f32(vl89AB, vd89AB, valphah89AB);
const float32x4_t voCDEF = vmlaq_f32(vlCDEF, vdCDEF, valphahCDEF);
vst1q_f32(output + 0, vo0123);
vst1q_f32(output + 4, vo4567);
vst1q_f32(output + 8, vo89AB);
vst1q_f32(output + 12, voCDEF);
output += 16;
}
for (; p >= 4; p -= 4) {
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
const float* itl2 = (const float*) ((uintptr_t) i[4] + input_offset);
const float* ibl2 = (const float*) ((uintptr_t) i[5] + input_offset);
const float* itl3 = (const float*) ((uintptr_t) i[6] + input_offset);
const float* ibl3 = (const float*) ((uintptr_t) i[7] + input_offset);
i += 8;
const float32x4x2_t vw = vld2q_f32(w);
w += 8;
const float32x2_t vtltr0 = vld1_f32(itl0);
const float32x2_t vblbr0 = vld1_f32(ibl0);
const float32x2_t vtltr1 = vld1_f32(itl1);
const float32x2_t vblbr1 = vld1_f32(ibl1);
const float32x2_t vtltr2 = vld1_f32(itl2);
const float32x2_t vblbr2 = vld1_f32(ibl2);
const float32x2_t vtltr3 = vld1_f32(itl3);
const float32x2_t vblbr3 = vld1_f32(ibl3);
const float32x4_t valphah = vw.val[0];
const float32x4_t valphav = vw.val[1];
const float32x4_t vtltr01 = vcombine_f32(vtltr0, vtltr1);
const float32x4_t vblbr01 = vcombine_f32(vblbr0, vblbr1);
const float32x4_t vtltr23 = vcombine_f32(vtltr2, vtltr3);
const float32x4_t vblbr23 = vcombine_f32(vblbr2, vblbr3);
const float32x4_t vldrd01 = vsubq_f32(vblbr01, vtltr01);
const float32x4_t vldrd23 = vsubq_f32(vblbr23, vtltr23);
const float32x4x2_t vld_t = vuzpq_f32(vldrd01, vldrd23);
const float32x4_t vld = vld_t.val[0];
const float32x4_t vrd = vld_t.val[1];
const float32x4x2_t vtl_t = vuzpq_f32(vtltr01, vtltr23);
const float32x4_t vtl = vtl_t.val[0];
const float32x4_t vtr = vtl_t.val[1];
const float32x4_t vl = vmlaq_f32(vtl, vld, valphav);
const float32x4_t vr = vmlaq_f32(vtr, vrd, valphav);
const float32x4_t vd = vsubq_f32(vr, vl);
const float32x4_t vo = vmlaq_f32(vl, vd, valphah);
vst1q_f32(output, vo);
output += 4;
}
if XNN_UNLIKELY(p != 0) {
if (p & 2) {
const float32x2x2_t vw = vld2_f32(w);
w += 4;
const float32x2_t valphah = vw.val[0];
const float32x2_t valphav = vw.val[1];
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
i += 4;
const float32x2_t vtltr0 = vld1_f32(itl0);
const float32x2_t vblbr0 = vld1_f32(ibl0);
const float32x2_t vtltr1 = vld1_f32(itl1);
const float32x2_t vblbr1 = vld1_f32(ibl1);
const float32x2_t vldrd0 = vsub_f32(vblbr0, vtltr0);
const float32x2_t vldrd1 = vsub_f32(vblbr1, vtltr1);
const float32x2x2_t vld_t = vuzp_f32(vldrd0, vldrd1);
const float32x2_t vld = vld_t.val[0];
const float32x2_t vrd = vld_t.val[1];
const float32x2x2_t vtl_t = vuzp_f32(vtltr0, vtltr1);
const float32x2_t vtl = vtl_t.val[0];
const float32x2_t vtr = vtl_t.val[1];
const float32x2_t vl = vmla_f32(vtl, vld, valphav);
const float32x2_t vr = vmla_f32(vtr, vrd, valphav);
const float32x2_t vd = vsub_f32(vr, vl);
const float32x2_t vo = vmla_f32(vl, vd, valphah);
vst1_f32(output, vo);
output += 2;
}
if (p & 1) {
// We are computing the following formula:
// result = (1 - alpha_h) * (1 - alpha_v) * top_left +
// alpha_h * (1 - alpha_v) * top_right +
// (1 - alpha_h) * alpha_v * bottom_left +
// alpha_h * alpha_v * bottom_right.
//
// Rearranging gives
// result = left + alpha_h * (right - left),
// where
// left = top_left + alpha_v * (bottom_left - top_left),
// right = top_right + alpha_v * (bottom_right - top_right).
const float alphah = *w;
const float32x2_t valphav = vld1_dup_f32(w + 1);
w += 2;
const float* itl = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl = (const float*) ((uintptr_t) i[1] + input_offset);
i += 2;
const float32x2_t vtltr = vld1_f32(itl);
const float32x2_t vblbr = vld1_f32(ibl);
// Compute at once
// left_diff = bottom_left - top_left
// right_diff = bottom_right - top_right
const float32x2_t vldrd = vsub_f32(vblbr, vtltr);
const float32x2_t vlr = vmla_f32(vtltr, vldrd, valphav);
// Extract them and compute the result.
const float l = vget_lane_f32(vlr, 0);
const float r = vget_lane_f32(vlr, 1);
*output++ = l + alphah * (r - l);
}
}
input_offset += input_increment;
} while (--channels != 0);
}
| 14,883 | 44.378049 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ibilinear-chw/gen/f32-ibilinear-chw-neon-p4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ibilinear-chw/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/ibilinear.h>
void xnn_f32_ibilinear_chw_ukernel__neon_p4(
size_t output_pixels,
size_t channels,
const float** restrict input,
size_t input_offset,
const float* restrict weights,
float* restrict output,
size_t input_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
assert(input_increment % sizeof(float) == 0);
do {
const float** i = input;
const float* w = weights;
size_t p = output_pixels;
for (; p >= 4; p -= 4) {
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
const float* itl2 = (const float*) ((uintptr_t) i[4] + input_offset);
const float* ibl2 = (const float*) ((uintptr_t) i[5] + input_offset);
const float* itl3 = (const float*) ((uintptr_t) i[6] + input_offset);
const float* ibl3 = (const float*) ((uintptr_t) i[7] + input_offset);
i += 8;
const float32x4x2_t vw = vld2q_f32(w);
w += 8;
const float32x2_t vtltr0 = vld1_f32(itl0);
const float32x2_t vblbr0 = vld1_f32(ibl0);
const float32x2_t vtltr1 = vld1_f32(itl1);
const float32x2_t vblbr1 = vld1_f32(ibl1);
const float32x2_t vtltr2 = vld1_f32(itl2);
const float32x2_t vblbr2 = vld1_f32(ibl2);
const float32x2_t vtltr3 = vld1_f32(itl3);
const float32x2_t vblbr3 = vld1_f32(ibl3);
const float32x4_t valphah = vw.val[0];
const float32x4_t valphav = vw.val[1];
const float32x4_t vtltr01 = vcombine_f32(vtltr0, vtltr1);
const float32x4_t vblbr01 = vcombine_f32(vblbr0, vblbr1);
const float32x4_t vtltr23 = vcombine_f32(vtltr2, vtltr3);
const float32x4_t vblbr23 = vcombine_f32(vblbr2, vblbr3);
const float32x4_t vldrd01 = vsubq_f32(vblbr01, vtltr01);
const float32x4_t vldrd23 = vsubq_f32(vblbr23, vtltr23);
const float32x4x2_t vld_t = vuzpq_f32(vldrd01, vldrd23);
const float32x4_t vld = vld_t.val[0];
const float32x4_t vrd = vld_t.val[1];
const float32x4x2_t vtl_t = vuzpq_f32(vtltr01, vtltr23);
const float32x4_t vtl = vtl_t.val[0];
const float32x4_t vtr = vtl_t.val[1];
const float32x4_t vl = vmlaq_f32(vtl, vld, valphav);
const float32x4_t vr = vmlaq_f32(vtr, vrd, valphav);
const float32x4_t vd = vsubq_f32(vr, vl);
const float32x4_t vo = vmlaq_f32(vl, vd, valphah);
vst1q_f32(output, vo);
output += 4;
}
if XNN_UNLIKELY(p != 0) {
if (p & 2) {
const float32x2x2_t vw = vld2_f32(w);
w += 4;
const float32x2_t valphah = vw.val[0];
const float32x2_t valphav = vw.val[1];
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
i += 4;
const float32x2_t vtltr0 = vld1_f32(itl0);
const float32x2_t vblbr0 = vld1_f32(ibl0);
const float32x2_t vtltr1 = vld1_f32(itl1);
const float32x2_t vblbr1 = vld1_f32(ibl1);
const float32x2_t vldrd0 = vsub_f32(vblbr0, vtltr0);
const float32x2_t vldrd1 = vsub_f32(vblbr1, vtltr1);
const float32x2x2_t vld_t = vuzp_f32(vldrd0, vldrd1);
const float32x2_t vld = vld_t.val[0];
const float32x2_t vrd = vld_t.val[1];
const float32x2x2_t vtl_t = vuzp_f32(vtltr0, vtltr1);
const float32x2_t vtl = vtl_t.val[0];
const float32x2_t vtr = vtl_t.val[1];
const float32x2_t vl = vmla_f32(vtl, vld, valphav);
const float32x2_t vr = vmla_f32(vtr, vrd, valphav);
const float32x2_t vd = vsub_f32(vr, vl);
const float32x2_t vo = vmla_f32(vl, vd, valphah);
vst1_f32(output, vo);
output += 2;
}
if (p & 1) {
// We are computing the following formula:
// result = (1 - alpha_h) * (1 - alpha_v) * top_left +
// alpha_h * (1 - alpha_v) * top_right +
// (1 - alpha_h) * alpha_v * bottom_left +
// alpha_h * alpha_v * bottom_right.
//
// Rearranging gives
// result = left + alpha_h * (right - left),
// where
// left = top_left + alpha_v * (bottom_left - top_left),
// right = top_right + alpha_v * (bottom_right - top_right).
const float alphah = *w;
const float32x2_t valphav = vld1_dup_f32(w + 1);
w += 2;
const float* itl = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl = (const float*) ((uintptr_t) i[1] + input_offset);
i += 2;
const float32x2_t vtltr = vld1_f32(itl);
const float32x2_t vblbr = vld1_f32(ibl);
// Compute at once
// left_diff = bottom_left - top_left
// right_diff = bottom_right - top_right
const float32x2_t vldrd = vsub_f32(vblbr, vtltr);
const float32x2_t vlr = vmla_f32(vtltr, vldrd, valphav);
// Extract them and compute the result.
const float l = vget_lane_f32(vlr, 0);
const float r = vget_lane_f32(vlr, 1);
*output++ = l + alphah * (r - l);
}
}
input_offset += input_increment;
} while (--channels != 0);
}
| 5,939 | 34.357143 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ibilinear-chw/gen/f32-ibilinear-chw-neon-p8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ibilinear-chw/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/ibilinear.h>
void xnn_f32_ibilinear_chw_ukernel__neon_p8(
size_t output_pixels,
size_t channels,
const float** restrict input,
size_t input_offset,
const float* restrict weights,
float* restrict output,
size_t input_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
assert(input_increment % sizeof(float) == 0);
do {
const float** i = input;
const float* w = weights;
size_t p = output_pixels;
for (; p >= 8; p -= 8) {
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
const float* itl2 = (const float*) ((uintptr_t) i[4] + input_offset);
const float* ibl2 = (const float*) ((uintptr_t) i[5] + input_offset);
const float* itl3 = (const float*) ((uintptr_t) i[6] + input_offset);
const float* ibl3 = (const float*) ((uintptr_t) i[7] + input_offset);
const float* itl4 = (const float*) ((uintptr_t) i[8] + input_offset);
const float* ibl4 = (const float*) ((uintptr_t) i[9] + input_offset);
const float* itl5 = (const float*) ((uintptr_t) i[10] + input_offset);
const float* ibl5 = (const float*) ((uintptr_t) i[11] + input_offset);
const float* itl6 = (const float*) ((uintptr_t) i[12] + input_offset);
const float* ibl6 = (const float*) ((uintptr_t) i[13] + input_offset);
const float* itl7 = (const float*) ((uintptr_t) i[14] + input_offset);
const float* ibl7 = (const float*) ((uintptr_t) i[15] + input_offset);
i += 2 * 8;
const float32x4x2_t vw0123 = vld2q_f32(w + 0);
const float32x4x2_t vw4567 = vld2q_f32(w + 8);
w += 2 * 8;
const float32x2_t vtltr0 = vld1_f32(itl0);
const float32x2_t vblbr0 = vld1_f32(ibl0);
const float32x2_t vtltr1 = vld1_f32(itl1);
const float32x2_t vblbr1 = vld1_f32(ibl1);
const float32x2_t vtltr2 = vld1_f32(itl2);
const float32x2_t vblbr2 = vld1_f32(ibl2);
const float32x2_t vtltr3 = vld1_f32(itl3);
const float32x2_t vblbr3 = vld1_f32(ibl3);
const float32x2_t vtltr4 = vld1_f32(itl4);
const float32x2_t vblbr4 = vld1_f32(ibl4);
const float32x2_t vtltr5 = vld1_f32(itl5);
const float32x2_t vblbr5 = vld1_f32(ibl5);
const float32x2_t vtltr6 = vld1_f32(itl6);
const float32x2_t vblbr6 = vld1_f32(ibl6);
const float32x2_t vtltr7 = vld1_f32(itl7);
const float32x2_t vblbr7 = vld1_f32(ibl7);
const float32x4_t valphah0123 = vw0123.val[0];
const float32x4_t valphav0123 = vw0123.val[1];
const float32x4_t valphah4567 = vw4567.val[0];
const float32x4_t valphav4567 = vw4567.val[1];
const float32x4_t vtltr01 = vcombine_f32(vtltr0, vtltr1);
const float32x4_t vblbr01 = vcombine_f32(vblbr0, vblbr1);
const float32x4_t vtltr23 = vcombine_f32(vtltr2, vtltr3);
const float32x4_t vblbr23 = vcombine_f32(vblbr2, vblbr3);
const float32x4_t vtltr45 = vcombine_f32(vtltr4, vtltr5);
const float32x4_t vblbr45 = vcombine_f32(vblbr4, vblbr5);
const float32x4_t vtltr67 = vcombine_f32(vtltr6, vtltr7);
const float32x4_t vblbr67 = vcombine_f32(vblbr6, vblbr7);
const float32x4_t vldrd01 = vsubq_f32(vblbr01, vtltr01);
const float32x4_t vldrd23 = vsubq_f32(vblbr23, vtltr23);
const float32x4_t vldrd45 = vsubq_f32(vblbr45, vtltr45);
const float32x4_t vldrd67 = vsubq_f32(vblbr67, vtltr67);
const float32x4x2_t vld_t0123 = vuzpq_f32(vldrd01, vldrd23);
const float32x4_t vld0123 = vld_t0123.val[0];
const float32x4_t vrd0123 = vld_t0123.val[1];
const float32x4x2_t vld_t4567 = vuzpq_f32(vldrd45, vldrd67);
const float32x4_t vld4567 = vld_t4567.val[0];
const float32x4_t vrd4567 = vld_t4567.val[1];
const float32x4x2_t vtl_t0123 = vuzpq_f32(vtltr01, vtltr23);
const float32x4_t vtl0123 = vtl_t0123.val[0];
const float32x4_t vtr0123 = vtl_t0123.val[1];
const float32x4x2_t vtl_t4567 = vuzpq_f32(vtltr45, vtltr67);
const float32x4_t vtl4567 = vtl_t4567.val[0];
const float32x4_t vtr4567 = vtl_t4567.val[1];
const float32x4_t vl0123 = vmlaq_f32(vtl0123, vld0123, valphav0123);
const float32x4_t vr0123 = vmlaq_f32(vtr0123, vrd0123, valphav0123);
const float32x4_t vl4567 = vmlaq_f32(vtl4567, vld4567, valphav4567);
const float32x4_t vr4567 = vmlaq_f32(vtr4567, vrd4567, valphav4567);
const float32x4_t vd0123 = vsubq_f32(vr0123, vl0123);
const float32x4_t vd4567 = vsubq_f32(vr4567, vl4567);
const float32x4_t vo0123 = vmlaq_f32(vl0123, vd0123, valphah0123);
const float32x4_t vo4567 = vmlaq_f32(vl4567, vd4567, valphah4567);
vst1q_f32(output + 0, vo0123);
vst1q_f32(output + 4, vo4567);
output += 8;
}
for (; p >= 4; p -= 4) {
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
const float* itl2 = (const float*) ((uintptr_t) i[4] + input_offset);
const float* ibl2 = (const float*) ((uintptr_t) i[5] + input_offset);
const float* itl3 = (const float*) ((uintptr_t) i[6] + input_offset);
const float* ibl3 = (const float*) ((uintptr_t) i[7] + input_offset);
i += 8;
const float32x4x2_t vw = vld2q_f32(w);
w += 8;
const float32x2_t vtltr0 = vld1_f32(itl0);
const float32x2_t vblbr0 = vld1_f32(ibl0);
const float32x2_t vtltr1 = vld1_f32(itl1);
const float32x2_t vblbr1 = vld1_f32(ibl1);
const float32x2_t vtltr2 = vld1_f32(itl2);
const float32x2_t vblbr2 = vld1_f32(ibl2);
const float32x2_t vtltr3 = vld1_f32(itl3);
const float32x2_t vblbr3 = vld1_f32(ibl3);
const float32x4_t valphah = vw.val[0];
const float32x4_t valphav = vw.val[1];
const float32x4_t vtltr01 = vcombine_f32(vtltr0, vtltr1);
const float32x4_t vblbr01 = vcombine_f32(vblbr0, vblbr1);
const float32x4_t vtltr23 = vcombine_f32(vtltr2, vtltr3);
const float32x4_t vblbr23 = vcombine_f32(vblbr2, vblbr3);
const float32x4_t vldrd01 = vsubq_f32(vblbr01, vtltr01);
const float32x4_t vldrd23 = vsubq_f32(vblbr23, vtltr23);
const float32x4x2_t vld_t = vuzpq_f32(vldrd01, vldrd23);
const float32x4_t vld = vld_t.val[0];
const float32x4_t vrd = vld_t.val[1];
const float32x4x2_t vtl_t = vuzpq_f32(vtltr01, vtltr23);
const float32x4_t vtl = vtl_t.val[0];
const float32x4_t vtr = vtl_t.val[1];
const float32x4_t vl = vmlaq_f32(vtl, vld, valphav);
const float32x4_t vr = vmlaq_f32(vtr, vrd, valphav);
const float32x4_t vd = vsubq_f32(vr, vl);
const float32x4_t vo = vmlaq_f32(vl, vd, valphah);
vst1q_f32(output, vo);
output += 4;
}
if XNN_UNLIKELY(p != 0) {
if (p & 2) {
const float32x2x2_t vw = vld2_f32(w);
w += 4;
const float32x2_t valphah = vw.val[0];
const float32x2_t valphav = vw.val[1];
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
i += 4;
const float32x2_t vtltr0 = vld1_f32(itl0);
const float32x2_t vblbr0 = vld1_f32(ibl0);
const float32x2_t vtltr1 = vld1_f32(itl1);
const float32x2_t vblbr1 = vld1_f32(ibl1);
const float32x2_t vldrd0 = vsub_f32(vblbr0, vtltr0);
const float32x2_t vldrd1 = vsub_f32(vblbr1, vtltr1);
const float32x2x2_t vld_t = vuzp_f32(vldrd0, vldrd1);
const float32x2_t vld = vld_t.val[0];
const float32x2_t vrd = vld_t.val[1];
const float32x2x2_t vtl_t = vuzp_f32(vtltr0, vtltr1);
const float32x2_t vtl = vtl_t.val[0];
const float32x2_t vtr = vtl_t.val[1];
const float32x2_t vl = vmla_f32(vtl, vld, valphav);
const float32x2_t vr = vmla_f32(vtr, vrd, valphav);
const float32x2_t vd = vsub_f32(vr, vl);
const float32x2_t vo = vmla_f32(vl, vd, valphah);
vst1_f32(output, vo);
output += 2;
}
if (p & 1) {
// We are computing the following formula:
// result = (1 - alpha_h) * (1 - alpha_v) * top_left +
// alpha_h * (1 - alpha_v) * top_right +
// (1 - alpha_h) * alpha_v * bottom_left +
// alpha_h * alpha_v * bottom_right.
//
// Rearranging gives
// result = left + alpha_h * (right - left),
// where
// left = top_left + alpha_v * (bottom_left - top_left),
// right = top_right + alpha_v * (bottom_right - top_right).
const float alphah = *w;
const float32x2_t valphav = vld1_dup_f32(w + 1);
w += 2;
const float* itl = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl = (const float*) ((uintptr_t) i[1] + input_offset);
i += 2;
const float32x2_t vtltr = vld1_f32(itl);
const float32x2_t vblbr = vld1_f32(ibl);
// Compute at once
// left_diff = bottom_left - top_left
// right_diff = bottom_right - top_right
const float32x2_t vldrd = vsub_f32(vblbr, vtltr);
const float32x2_t vlr = vmla_f32(vtltr, vldrd, valphav);
// Extract them and compute the result.
const float l = vget_lane_f32(vlr, 0);
const float r = vget_lane_f32(vlr, 1);
*output++ = l + alphah * (r - l);
}
}
input_offset += input_increment;
} while (--channels != 0);
}
| 10,452 | 39.832031 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ibilinear-chw/gen/f32-ibilinear-chw-neonfma-p16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ibilinear-chw/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/ibilinear.h>
void xnn_f32_ibilinear_chw_ukernel__neonfma_p16(
size_t output_pixels,
size_t channels,
const float** restrict input,
size_t input_offset,
const float* restrict weights,
float* restrict output,
size_t input_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
assert(input_increment % sizeof(float) == 0);
do {
const float** i = input;
const float* w = weights;
size_t p = output_pixels;
for (; p >= 16; p -= 16) {
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
const float* itl2 = (const float*) ((uintptr_t) i[4] + input_offset);
const float* ibl2 = (const float*) ((uintptr_t) i[5] + input_offset);
const float* itl3 = (const float*) ((uintptr_t) i[6] + input_offset);
const float* ibl3 = (const float*) ((uintptr_t) i[7] + input_offset);
const float* itl4 = (const float*) ((uintptr_t) i[8] + input_offset);
const float* ibl4 = (const float*) ((uintptr_t) i[9] + input_offset);
const float* itl5 = (const float*) ((uintptr_t) i[10] + input_offset);
const float* ibl5 = (const float*) ((uintptr_t) i[11] + input_offset);
const float* itl6 = (const float*) ((uintptr_t) i[12] + input_offset);
const float* ibl6 = (const float*) ((uintptr_t) i[13] + input_offset);
const float* itl7 = (const float*) ((uintptr_t) i[14] + input_offset);
const float* ibl7 = (const float*) ((uintptr_t) i[15] + input_offset);
const float* itl8 = (const float*) ((uintptr_t) i[16] + input_offset);
const float* ibl8 = (const float*) ((uintptr_t) i[17] + input_offset);
const float* itl9 = (const float*) ((uintptr_t) i[18] + input_offset);
const float* ibl9 = (const float*) ((uintptr_t) i[19] + input_offset);
const float* itlA = (const float*) ((uintptr_t) i[20] + input_offset);
const float* iblA = (const float*) ((uintptr_t) i[21] + input_offset);
const float* itlB = (const float*) ((uintptr_t) i[22] + input_offset);
const float* iblB = (const float*) ((uintptr_t) i[23] + input_offset);
const float* itlC = (const float*) ((uintptr_t) i[24] + input_offset);
const float* iblC = (const float*) ((uintptr_t) i[25] + input_offset);
const float* itlD = (const float*) ((uintptr_t) i[26] + input_offset);
const float* iblD = (const float*) ((uintptr_t) i[27] + input_offset);
const float* itlE = (const float*) ((uintptr_t) i[28] + input_offset);
const float* iblE = (const float*) ((uintptr_t) i[29] + input_offset);
const float* itlF = (const float*) ((uintptr_t) i[30] + input_offset);
const float* iblF = (const float*) ((uintptr_t) i[31] + input_offset);
i += 2 * 16;
const float32x4x2_t vw0123 = vld2q_f32(w + 0);
const float32x4x2_t vw4567 = vld2q_f32(w + 8);
const float32x4x2_t vw89AB = vld2q_f32(w + 16);
const float32x4x2_t vwCDEF = vld2q_f32(w + 24);
w += 2 * 16;
const float32x2_t vtltr0 = vld1_f32(itl0);
const float32x2_t vblbr0 = vld1_f32(ibl0);
const float32x2_t vtltr1 = vld1_f32(itl1);
const float32x2_t vblbr1 = vld1_f32(ibl1);
const float32x2_t vtltr2 = vld1_f32(itl2);
const float32x2_t vblbr2 = vld1_f32(ibl2);
const float32x2_t vtltr3 = vld1_f32(itl3);
const float32x2_t vblbr3 = vld1_f32(ibl3);
const float32x2_t vtltr4 = vld1_f32(itl4);
const float32x2_t vblbr4 = vld1_f32(ibl4);
const float32x2_t vtltr5 = vld1_f32(itl5);
const float32x2_t vblbr5 = vld1_f32(ibl5);
const float32x2_t vtltr6 = vld1_f32(itl6);
const float32x2_t vblbr6 = vld1_f32(ibl6);
const float32x2_t vtltr7 = vld1_f32(itl7);
const float32x2_t vblbr7 = vld1_f32(ibl7);
const float32x2_t vtltr8 = vld1_f32(itl8);
const float32x2_t vblbr8 = vld1_f32(ibl8);
const float32x2_t vtltr9 = vld1_f32(itl9);
const float32x2_t vblbr9 = vld1_f32(ibl9);
const float32x2_t vtltrA = vld1_f32(itlA);
const float32x2_t vblbrA = vld1_f32(iblA);
const float32x2_t vtltrB = vld1_f32(itlB);
const float32x2_t vblbrB = vld1_f32(iblB);
const float32x2_t vtltrC = vld1_f32(itlC);
const float32x2_t vblbrC = vld1_f32(iblC);
const float32x2_t vtltrD = vld1_f32(itlD);
const float32x2_t vblbrD = vld1_f32(iblD);
const float32x2_t vtltrE = vld1_f32(itlE);
const float32x2_t vblbrE = vld1_f32(iblE);
const float32x2_t vtltrF = vld1_f32(itlF);
const float32x2_t vblbrF = vld1_f32(iblF);
const float32x4_t valphah0123 = vw0123.val[0];
const float32x4_t valphav0123 = vw0123.val[1];
const float32x4_t valphah4567 = vw4567.val[0];
const float32x4_t valphav4567 = vw4567.val[1];
const float32x4_t valphah89AB = vw89AB.val[0];
const float32x4_t valphav89AB = vw89AB.val[1];
const float32x4_t valphahCDEF = vwCDEF.val[0];
const float32x4_t valphavCDEF = vwCDEF.val[1];
const float32x4_t vtltr01 = vcombine_f32(vtltr0, vtltr1);
const float32x4_t vblbr01 = vcombine_f32(vblbr0, vblbr1);
const float32x4_t vtltr23 = vcombine_f32(vtltr2, vtltr3);
const float32x4_t vblbr23 = vcombine_f32(vblbr2, vblbr3);
const float32x4_t vtltr45 = vcombine_f32(vtltr4, vtltr5);
const float32x4_t vblbr45 = vcombine_f32(vblbr4, vblbr5);
const float32x4_t vtltr67 = vcombine_f32(vtltr6, vtltr7);
const float32x4_t vblbr67 = vcombine_f32(vblbr6, vblbr7);
const float32x4_t vtltr89 = vcombine_f32(vtltr8, vtltr9);
const float32x4_t vblbr89 = vcombine_f32(vblbr8, vblbr9);
const float32x4_t vtltrAB = vcombine_f32(vtltrA, vtltrB);
const float32x4_t vblbrAB = vcombine_f32(vblbrA, vblbrB);
const float32x4_t vtltrCD = vcombine_f32(vtltrC, vtltrD);
const float32x4_t vblbrCD = vcombine_f32(vblbrC, vblbrD);
const float32x4_t vtltrEF = vcombine_f32(vtltrE, vtltrF);
const float32x4_t vblbrEF = vcombine_f32(vblbrE, vblbrF);
const float32x4_t vldrd01 = vsubq_f32(vblbr01, vtltr01);
const float32x4_t vldrd23 = vsubq_f32(vblbr23, vtltr23);
const float32x4_t vldrd45 = vsubq_f32(vblbr45, vtltr45);
const float32x4_t vldrd67 = vsubq_f32(vblbr67, vtltr67);
const float32x4_t vldrd89 = vsubq_f32(vblbr89, vtltr89);
const float32x4_t vldrdAB = vsubq_f32(vblbrAB, vtltrAB);
const float32x4_t vldrdCD = vsubq_f32(vblbrCD, vtltrCD);
const float32x4_t vldrdEF = vsubq_f32(vblbrEF, vtltrEF);
const float32x4x2_t vld_t0123 = vuzpq_f32(vldrd01, vldrd23);
const float32x4_t vld0123 = vld_t0123.val[0];
const float32x4_t vrd0123 = vld_t0123.val[1];
const float32x4x2_t vld_t4567 = vuzpq_f32(vldrd45, vldrd67);
const float32x4_t vld4567 = vld_t4567.val[0];
const float32x4_t vrd4567 = vld_t4567.val[1];
const float32x4x2_t vld_t89AB = vuzpq_f32(vldrd89, vldrdAB);
const float32x4_t vld89AB = vld_t89AB.val[0];
const float32x4_t vrd89AB = vld_t89AB.val[1];
const float32x4x2_t vld_tCDEF = vuzpq_f32(vldrdCD, vldrdEF);
const float32x4_t vldCDEF = vld_tCDEF.val[0];
const float32x4_t vrdCDEF = vld_tCDEF.val[1];
const float32x4x2_t vtl_t0123 = vuzpq_f32(vtltr01, vtltr23);
const float32x4_t vtl0123 = vtl_t0123.val[0];
const float32x4_t vtr0123 = vtl_t0123.val[1];
const float32x4x2_t vtl_t4567 = vuzpq_f32(vtltr45, vtltr67);
const float32x4_t vtl4567 = vtl_t4567.val[0];
const float32x4_t vtr4567 = vtl_t4567.val[1];
const float32x4x2_t vtl_t89AB = vuzpq_f32(vtltr89, vtltrAB);
const float32x4_t vtl89AB = vtl_t89AB.val[0];
const float32x4_t vtr89AB = vtl_t89AB.val[1];
const float32x4x2_t vtl_tCDEF = vuzpq_f32(vtltrCD, vtltrEF);
const float32x4_t vtlCDEF = vtl_tCDEF.val[0];
const float32x4_t vtrCDEF = vtl_tCDEF.val[1];
const float32x4_t vl0123 = vfmaq_f32(vtl0123, vld0123, valphav0123);
const float32x4_t vr0123 = vfmaq_f32(vtr0123, vrd0123, valphav0123);
const float32x4_t vl4567 = vfmaq_f32(vtl4567, vld4567, valphav4567);
const float32x4_t vr4567 = vfmaq_f32(vtr4567, vrd4567, valphav4567);
const float32x4_t vl89AB = vfmaq_f32(vtl89AB, vld89AB, valphav89AB);
const float32x4_t vr89AB = vfmaq_f32(vtr89AB, vrd89AB, valphav89AB);
const float32x4_t vlCDEF = vfmaq_f32(vtlCDEF, vldCDEF, valphavCDEF);
const float32x4_t vrCDEF = vfmaq_f32(vtrCDEF, vrdCDEF, valphavCDEF);
const float32x4_t vd0123 = vsubq_f32(vr0123, vl0123);
const float32x4_t vd4567 = vsubq_f32(vr4567, vl4567);
const float32x4_t vd89AB = vsubq_f32(vr89AB, vl89AB);
const float32x4_t vdCDEF = vsubq_f32(vrCDEF, vlCDEF);
const float32x4_t vo0123 = vfmaq_f32(vl0123, vd0123, valphah0123);
const float32x4_t vo4567 = vfmaq_f32(vl4567, vd4567, valphah4567);
const float32x4_t vo89AB = vfmaq_f32(vl89AB, vd89AB, valphah89AB);
const float32x4_t voCDEF = vfmaq_f32(vlCDEF, vdCDEF, valphahCDEF);
vst1q_f32(output + 0, vo0123);
vst1q_f32(output + 4, vo4567);
vst1q_f32(output + 8, vo89AB);
vst1q_f32(output + 12, voCDEF);
output += 16;
}
for (; p >= 4; p -= 4) {
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
const float* itl2 = (const float*) ((uintptr_t) i[4] + input_offset);
const float* ibl2 = (const float*) ((uintptr_t) i[5] + input_offset);
const float* itl3 = (const float*) ((uintptr_t) i[6] + input_offset);
const float* ibl3 = (const float*) ((uintptr_t) i[7] + input_offset);
i += 8;
const float32x4x2_t vw = vld2q_f32(w);
w += 8;
const float32x2_t vtltr0 = vld1_f32(itl0);
const float32x2_t vblbr0 = vld1_f32(ibl0);
const float32x2_t vtltr1 = vld1_f32(itl1);
const float32x2_t vblbr1 = vld1_f32(ibl1);
const float32x2_t vtltr2 = vld1_f32(itl2);
const float32x2_t vblbr2 = vld1_f32(ibl2);
const float32x2_t vtltr3 = vld1_f32(itl3);
const float32x2_t vblbr3 = vld1_f32(ibl3);
const float32x4_t valphah = vw.val[0];
const float32x4_t valphav = vw.val[1];
const float32x4_t vtltr01 = vcombine_f32(vtltr0, vtltr1);
const float32x4_t vblbr01 = vcombine_f32(vblbr0, vblbr1);
const float32x4_t vtltr23 = vcombine_f32(vtltr2, vtltr3);
const float32x4_t vblbr23 = vcombine_f32(vblbr2, vblbr3);
const float32x4_t vldrd01 = vsubq_f32(vblbr01, vtltr01);
const float32x4_t vldrd23 = vsubq_f32(vblbr23, vtltr23);
const float32x4x2_t vld_t = vuzpq_f32(vldrd01, vldrd23);
const float32x4_t vld = vld_t.val[0];
const float32x4_t vrd = vld_t.val[1];
const float32x4x2_t vtl_t = vuzpq_f32(vtltr01, vtltr23);
const float32x4_t vtl = vtl_t.val[0];
const float32x4_t vtr = vtl_t.val[1];
const float32x4_t vl = vfmaq_f32(vtl, vld, valphav);
const float32x4_t vr = vfmaq_f32(vtr, vrd, valphav);
const float32x4_t vd = vsubq_f32(vr, vl);
const float32x4_t vo = vfmaq_f32(vl, vd, valphah);
vst1q_f32(output, vo);
output += 4;
}
if XNN_UNLIKELY(p != 0) {
if (p & 2) {
const float32x2x2_t vw = vld2_f32(w);
w += 4;
const float32x2_t valphah = vw.val[0];
const float32x2_t valphav = vw.val[1];
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
i += 4;
const float32x2_t vtltr0 = vld1_f32(itl0);
const float32x2_t vblbr0 = vld1_f32(ibl0);
const float32x2_t vtltr1 = vld1_f32(itl1);
const float32x2_t vblbr1 = vld1_f32(ibl1);
const float32x2_t vldrd0 = vsub_f32(vblbr0, vtltr0);
const float32x2_t vldrd1 = vsub_f32(vblbr1, vtltr1);
const float32x2x2_t vld_t = vuzp_f32(vldrd0, vldrd1);
const float32x2_t vld = vld_t.val[0];
const float32x2_t vrd = vld_t.val[1];
const float32x2x2_t vtl_t = vuzp_f32(vtltr0, vtltr1);
const float32x2_t vtl = vtl_t.val[0];
const float32x2_t vtr = vtl_t.val[1];
const float32x2_t vl = vfma_f32(vtl, vld, valphav);
const float32x2_t vr = vfma_f32(vtr, vrd, valphav);
const float32x2_t vd = vsub_f32(vr, vl);
const float32x2_t vo = vfma_f32(vl, vd, valphah);
vst1_f32(output, vo);
output += 2;
}
if (p & 1) {
// We are computing the following formula:
// result = (1 - alpha_h) * (1 - alpha_v) * top_left +
// alpha_h * (1 - alpha_v) * top_right +
// (1 - alpha_h) * alpha_v * bottom_left +
// alpha_h * alpha_v * bottom_right.
//
// Rearranging gives
// result = left + alpha_h * (right - left),
// where
// left = top_left + alpha_v * (bottom_left - top_left),
// right = top_right + alpha_v * (bottom_right - top_right).
const float alphah = *w;
const float32x2_t valphav = vld1_dup_f32(w + 1);
w += 2;
const float* itl = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl = (const float*) ((uintptr_t) i[1] + input_offset);
i += 2;
const float32x2_t vtltr = vld1_f32(itl);
const float32x2_t vblbr = vld1_f32(ibl);
// Compute at once
// left_diff = bottom_left - top_left
// right_diff = bottom_right - top_right
const float32x2_t vldrd = vsub_f32(vblbr, vtltr);
const float32x2_t vlr = vfma_f32(vtltr, vldrd, valphav);
// Extract them and compute the result.
const float l = vget_lane_f32(vlr, 0);
const float r = vget_lane_f32(vlr, 1);
*output++ = l + alphah * (r - l);
}
}
input_offset += input_increment;
} while (--channels != 0);
}
| 14,886 | 44.387195 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ibilinear-chw/gen/f32-ibilinear-chw-neonfma-p4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ibilinear-chw/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/ibilinear.h>
void xnn_f32_ibilinear_chw_ukernel__neonfma_p4(
size_t output_pixels,
size_t channels,
const float** restrict input,
size_t input_offset,
const float* restrict weights,
float* restrict output,
size_t input_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
assert(input_increment % sizeof(float) == 0);
do {
const float** i = input;
const float* w = weights;
size_t p = output_pixels;
for (; p >= 4; p -= 4) {
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
const float* itl2 = (const float*) ((uintptr_t) i[4] + input_offset);
const float* ibl2 = (const float*) ((uintptr_t) i[5] + input_offset);
const float* itl3 = (const float*) ((uintptr_t) i[6] + input_offset);
const float* ibl3 = (const float*) ((uintptr_t) i[7] + input_offset);
i += 8;
const float32x4x2_t vw = vld2q_f32(w);
w += 8;
const float32x2_t vtltr0 = vld1_f32(itl0);
const float32x2_t vblbr0 = vld1_f32(ibl0);
const float32x2_t vtltr1 = vld1_f32(itl1);
const float32x2_t vblbr1 = vld1_f32(ibl1);
const float32x2_t vtltr2 = vld1_f32(itl2);
const float32x2_t vblbr2 = vld1_f32(ibl2);
const float32x2_t vtltr3 = vld1_f32(itl3);
const float32x2_t vblbr3 = vld1_f32(ibl3);
const float32x4_t valphah = vw.val[0];
const float32x4_t valphav = vw.val[1];
const float32x4_t vtltr01 = vcombine_f32(vtltr0, vtltr1);
const float32x4_t vblbr01 = vcombine_f32(vblbr0, vblbr1);
const float32x4_t vtltr23 = vcombine_f32(vtltr2, vtltr3);
const float32x4_t vblbr23 = vcombine_f32(vblbr2, vblbr3);
const float32x4_t vldrd01 = vsubq_f32(vblbr01, vtltr01);
const float32x4_t vldrd23 = vsubq_f32(vblbr23, vtltr23);
const float32x4x2_t vld_t = vuzpq_f32(vldrd01, vldrd23);
const float32x4_t vld = vld_t.val[0];
const float32x4_t vrd = vld_t.val[1];
const float32x4x2_t vtl_t = vuzpq_f32(vtltr01, vtltr23);
const float32x4_t vtl = vtl_t.val[0];
const float32x4_t vtr = vtl_t.val[1];
const float32x4_t vl = vfmaq_f32(vtl, vld, valphav);
const float32x4_t vr = vfmaq_f32(vtr, vrd, valphav);
const float32x4_t vd = vsubq_f32(vr, vl);
const float32x4_t vo = vfmaq_f32(vl, vd, valphah);
vst1q_f32(output, vo);
output += 4;
}
if XNN_UNLIKELY(p != 0) {
if (p & 2) {
const float32x2x2_t vw = vld2_f32(w);
w += 4;
const float32x2_t valphah = vw.val[0];
const float32x2_t valphav = vw.val[1];
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
i += 4;
const float32x2_t vtltr0 = vld1_f32(itl0);
const float32x2_t vblbr0 = vld1_f32(ibl0);
const float32x2_t vtltr1 = vld1_f32(itl1);
const float32x2_t vblbr1 = vld1_f32(ibl1);
const float32x2_t vldrd0 = vsub_f32(vblbr0, vtltr0);
const float32x2_t vldrd1 = vsub_f32(vblbr1, vtltr1);
const float32x2x2_t vld_t = vuzp_f32(vldrd0, vldrd1);
const float32x2_t vld = vld_t.val[0];
const float32x2_t vrd = vld_t.val[1];
const float32x2x2_t vtl_t = vuzp_f32(vtltr0, vtltr1);
const float32x2_t vtl = vtl_t.val[0];
const float32x2_t vtr = vtl_t.val[1];
const float32x2_t vl = vfma_f32(vtl, vld, valphav);
const float32x2_t vr = vfma_f32(vtr, vrd, valphav);
const float32x2_t vd = vsub_f32(vr, vl);
const float32x2_t vo = vfma_f32(vl, vd, valphah);
vst1_f32(output, vo);
output += 2;
}
if (p & 1) {
// We are computing the following formula:
// result = (1 - alpha_h) * (1 - alpha_v) * top_left +
// alpha_h * (1 - alpha_v) * top_right +
// (1 - alpha_h) * alpha_v * bottom_left +
// alpha_h * alpha_v * bottom_right.
//
// Rearranging gives
// result = left + alpha_h * (right - left),
// where
// left = top_left + alpha_v * (bottom_left - top_left),
// right = top_right + alpha_v * (bottom_right - top_right).
const float alphah = *w;
const float32x2_t valphav = vld1_dup_f32(w + 1);
w += 2;
const float* itl = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl = (const float*) ((uintptr_t) i[1] + input_offset);
i += 2;
const float32x2_t vtltr = vld1_f32(itl);
const float32x2_t vblbr = vld1_f32(ibl);
// Compute at once
// left_diff = bottom_left - top_left
// right_diff = bottom_right - top_right
const float32x2_t vldrd = vsub_f32(vblbr, vtltr);
const float32x2_t vlr = vfma_f32(vtltr, vldrd, valphav);
// Extract them and compute the result.
const float l = vget_lane_f32(vlr, 0);
const float r = vget_lane_f32(vlr, 1);
*output++ = l + alphah * (r - l);
}
}
input_offset += input_increment;
} while (--channels != 0);
}
| 5,942 | 34.375 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ibilinear-chw/gen/f32-ibilinear-chw-neonfma-p8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ibilinear-chw/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/ibilinear.h>
void xnn_f32_ibilinear_chw_ukernel__neonfma_p8(
size_t output_pixels,
size_t channels,
const float** restrict input,
size_t input_offset,
const float* restrict weights,
float* restrict output,
size_t input_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
assert(input_increment % sizeof(float) == 0);
do {
const float** i = input;
const float* w = weights;
size_t p = output_pixels;
for (; p >= 8; p -= 8) {
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
const float* itl2 = (const float*) ((uintptr_t) i[4] + input_offset);
const float* ibl2 = (const float*) ((uintptr_t) i[5] + input_offset);
const float* itl3 = (const float*) ((uintptr_t) i[6] + input_offset);
const float* ibl3 = (const float*) ((uintptr_t) i[7] + input_offset);
const float* itl4 = (const float*) ((uintptr_t) i[8] + input_offset);
const float* ibl4 = (const float*) ((uintptr_t) i[9] + input_offset);
const float* itl5 = (const float*) ((uintptr_t) i[10] + input_offset);
const float* ibl5 = (const float*) ((uintptr_t) i[11] + input_offset);
const float* itl6 = (const float*) ((uintptr_t) i[12] + input_offset);
const float* ibl6 = (const float*) ((uintptr_t) i[13] + input_offset);
const float* itl7 = (const float*) ((uintptr_t) i[14] + input_offset);
const float* ibl7 = (const float*) ((uintptr_t) i[15] + input_offset);
i += 2 * 8;
const float32x4x2_t vw0123 = vld2q_f32(w + 0);
const float32x4x2_t vw4567 = vld2q_f32(w + 8);
w += 2 * 8;
const float32x2_t vtltr0 = vld1_f32(itl0);
const float32x2_t vblbr0 = vld1_f32(ibl0);
const float32x2_t vtltr1 = vld1_f32(itl1);
const float32x2_t vblbr1 = vld1_f32(ibl1);
const float32x2_t vtltr2 = vld1_f32(itl2);
const float32x2_t vblbr2 = vld1_f32(ibl2);
const float32x2_t vtltr3 = vld1_f32(itl3);
const float32x2_t vblbr3 = vld1_f32(ibl3);
const float32x2_t vtltr4 = vld1_f32(itl4);
const float32x2_t vblbr4 = vld1_f32(ibl4);
const float32x2_t vtltr5 = vld1_f32(itl5);
const float32x2_t vblbr5 = vld1_f32(ibl5);
const float32x2_t vtltr6 = vld1_f32(itl6);
const float32x2_t vblbr6 = vld1_f32(ibl6);
const float32x2_t vtltr7 = vld1_f32(itl7);
const float32x2_t vblbr7 = vld1_f32(ibl7);
const float32x4_t valphah0123 = vw0123.val[0];
const float32x4_t valphav0123 = vw0123.val[1];
const float32x4_t valphah4567 = vw4567.val[0];
const float32x4_t valphav4567 = vw4567.val[1];
const float32x4_t vtltr01 = vcombine_f32(vtltr0, vtltr1);
const float32x4_t vblbr01 = vcombine_f32(vblbr0, vblbr1);
const float32x4_t vtltr23 = vcombine_f32(vtltr2, vtltr3);
const float32x4_t vblbr23 = vcombine_f32(vblbr2, vblbr3);
const float32x4_t vtltr45 = vcombine_f32(vtltr4, vtltr5);
const float32x4_t vblbr45 = vcombine_f32(vblbr4, vblbr5);
const float32x4_t vtltr67 = vcombine_f32(vtltr6, vtltr7);
const float32x4_t vblbr67 = vcombine_f32(vblbr6, vblbr7);
const float32x4_t vldrd01 = vsubq_f32(vblbr01, vtltr01);
const float32x4_t vldrd23 = vsubq_f32(vblbr23, vtltr23);
const float32x4_t vldrd45 = vsubq_f32(vblbr45, vtltr45);
const float32x4_t vldrd67 = vsubq_f32(vblbr67, vtltr67);
const float32x4x2_t vld_t0123 = vuzpq_f32(vldrd01, vldrd23);
const float32x4_t vld0123 = vld_t0123.val[0];
const float32x4_t vrd0123 = vld_t0123.val[1];
const float32x4x2_t vld_t4567 = vuzpq_f32(vldrd45, vldrd67);
const float32x4_t vld4567 = vld_t4567.val[0];
const float32x4_t vrd4567 = vld_t4567.val[1];
const float32x4x2_t vtl_t0123 = vuzpq_f32(vtltr01, vtltr23);
const float32x4_t vtl0123 = vtl_t0123.val[0];
const float32x4_t vtr0123 = vtl_t0123.val[1];
const float32x4x2_t vtl_t4567 = vuzpq_f32(vtltr45, vtltr67);
const float32x4_t vtl4567 = vtl_t4567.val[0];
const float32x4_t vtr4567 = vtl_t4567.val[1];
const float32x4_t vl0123 = vfmaq_f32(vtl0123, vld0123, valphav0123);
const float32x4_t vr0123 = vfmaq_f32(vtr0123, vrd0123, valphav0123);
const float32x4_t vl4567 = vfmaq_f32(vtl4567, vld4567, valphav4567);
const float32x4_t vr4567 = vfmaq_f32(vtr4567, vrd4567, valphav4567);
const float32x4_t vd0123 = vsubq_f32(vr0123, vl0123);
const float32x4_t vd4567 = vsubq_f32(vr4567, vl4567);
const float32x4_t vo0123 = vfmaq_f32(vl0123, vd0123, valphah0123);
const float32x4_t vo4567 = vfmaq_f32(vl4567, vd4567, valphah4567);
vst1q_f32(output + 0, vo0123);
vst1q_f32(output + 4, vo4567);
output += 8;
}
for (; p >= 4; p -= 4) {
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
const float* itl2 = (const float*) ((uintptr_t) i[4] + input_offset);
const float* ibl2 = (const float*) ((uintptr_t) i[5] + input_offset);
const float* itl3 = (const float*) ((uintptr_t) i[6] + input_offset);
const float* ibl3 = (const float*) ((uintptr_t) i[7] + input_offset);
i += 8;
const float32x4x2_t vw = vld2q_f32(w);
w += 8;
const float32x2_t vtltr0 = vld1_f32(itl0);
const float32x2_t vblbr0 = vld1_f32(ibl0);
const float32x2_t vtltr1 = vld1_f32(itl1);
const float32x2_t vblbr1 = vld1_f32(ibl1);
const float32x2_t vtltr2 = vld1_f32(itl2);
const float32x2_t vblbr2 = vld1_f32(ibl2);
const float32x2_t vtltr3 = vld1_f32(itl3);
const float32x2_t vblbr3 = vld1_f32(ibl3);
const float32x4_t valphah = vw.val[0];
const float32x4_t valphav = vw.val[1];
const float32x4_t vtltr01 = vcombine_f32(vtltr0, vtltr1);
const float32x4_t vblbr01 = vcombine_f32(vblbr0, vblbr1);
const float32x4_t vtltr23 = vcombine_f32(vtltr2, vtltr3);
const float32x4_t vblbr23 = vcombine_f32(vblbr2, vblbr3);
const float32x4_t vldrd01 = vsubq_f32(vblbr01, vtltr01);
const float32x4_t vldrd23 = vsubq_f32(vblbr23, vtltr23);
const float32x4x2_t vld_t = vuzpq_f32(vldrd01, vldrd23);
const float32x4_t vld = vld_t.val[0];
const float32x4_t vrd = vld_t.val[1];
const float32x4x2_t vtl_t = vuzpq_f32(vtltr01, vtltr23);
const float32x4_t vtl = vtl_t.val[0];
const float32x4_t vtr = vtl_t.val[1];
const float32x4_t vl = vfmaq_f32(vtl, vld, valphav);
const float32x4_t vr = vfmaq_f32(vtr, vrd, valphav);
const float32x4_t vd = vsubq_f32(vr, vl);
const float32x4_t vo = vfmaq_f32(vl, vd, valphah);
vst1q_f32(output, vo);
output += 4;
}
if XNN_UNLIKELY(p != 0) {
if (p & 2) {
const float32x2x2_t vw = vld2_f32(w);
w += 4;
const float32x2_t valphah = vw.val[0];
const float32x2_t valphav = vw.val[1];
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
i += 4;
const float32x2_t vtltr0 = vld1_f32(itl0);
const float32x2_t vblbr0 = vld1_f32(ibl0);
const float32x2_t vtltr1 = vld1_f32(itl1);
const float32x2_t vblbr1 = vld1_f32(ibl1);
const float32x2_t vldrd0 = vsub_f32(vblbr0, vtltr0);
const float32x2_t vldrd1 = vsub_f32(vblbr1, vtltr1);
const float32x2x2_t vld_t = vuzp_f32(vldrd0, vldrd1);
const float32x2_t vld = vld_t.val[0];
const float32x2_t vrd = vld_t.val[1];
const float32x2x2_t vtl_t = vuzp_f32(vtltr0, vtltr1);
const float32x2_t vtl = vtl_t.val[0];
const float32x2_t vtr = vtl_t.val[1];
const float32x2_t vl = vfma_f32(vtl, vld, valphav);
const float32x2_t vr = vfma_f32(vtr, vrd, valphav);
const float32x2_t vd = vsub_f32(vr, vl);
const float32x2_t vo = vfma_f32(vl, vd, valphah);
vst1_f32(output, vo);
output += 2;
}
if (p & 1) {
// We are computing the following formula:
// result = (1 - alpha_h) * (1 - alpha_v) * top_left +
// alpha_h * (1 - alpha_v) * top_right +
// (1 - alpha_h) * alpha_v * bottom_left +
// alpha_h * alpha_v * bottom_right.
//
// Rearranging gives
// result = left + alpha_h * (right - left),
// where
// left = top_left + alpha_v * (bottom_left - top_left),
// right = top_right + alpha_v * (bottom_right - top_right).
const float alphah = *w;
const float32x2_t valphav = vld1_dup_f32(w + 1);
w += 2;
const float* itl = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl = (const float*) ((uintptr_t) i[1] + input_offset);
i += 2;
const float32x2_t vtltr = vld1_f32(itl);
const float32x2_t vblbr = vld1_f32(ibl);
// Compute at once
// left_diff = bottom_left - top_left
// right_diff = bottom_right - top_right
const float32x2_t vldrd = vsub_f32(vblbr, vtltr);
const float32x2_t vlr = vfma_f32(vtltr, vldrd, valphav);
// Extract them and compute the result.
const float l = vget_lane_f32(vlr, 0);
const float r = vget_lane_f32(vlr, 1);
*output++ = l + alphah * (r - l);
}
}
input_offset += input_increment;
} while (--channels != 0);
}
| 10,455 | 39.84375 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ibilinear-chw/gen/f32-ibilinear-chw-scalar-p1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ibilinear-chw/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/ibilinear.h>
void xnn_f32_ibilinear_chw_ukernel__scalar_p1(
size_t output_pixels,
size_t channels,
const float** restrict input,
size_t input_offset,
const float* restrict weights,
float* restrict output,
size_t input_increment)
{
assert(output_pixels != 0);
assert(channels != 0);
assert(input_increment % sizeof(float) == 0);
size_t c = channels;
do {
const float** i = input;
const float* w = weights;
size_t p = output_pixels;
do {
const float* itl = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl = (const float*) ((uintptr_t) i[1] + input_offset);
i += 2;
const float valphah = w[0];
const float valphav = w[1];
w += 2;
const float vtl = itl[0];
const float vtr = itl[1];
const float vbl = ibl[0];
const float vbr = ibl[1];
const float vtd = vtr - vtl;
const float vbd = vbr - vbl;
const float vt = vtl + vtd * valphah;
const float vb = vbl + vbd * valphah;
const float vd = vb - vt;
const float vo = vt + vd * valphav;
*output++ = vo;
} while (--p != 0);
input_offset += input_increment;
c--;
} while (c != 0);
}
| 1,544 | 22.409091 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ibilinear-chw/gen/f32-ibilinear-chw-scalar-p2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ibilinear-chw/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/ibilinear.h>
void xnn_f32_ibilinear_chw_ukernel__scalar_p2(
size_t output_pixels,
size_t channels,
const float** restrict input,
size_t input_offset,
const float* restrict weights,
float* restrict output,
size_t input_increment)
{
assert(output_pixels != 0);
assert(channels != 0);
assert(input_increment % sizeof(float) == 0);
size_t c = channels;
do {
const float** i = input;
const float* w = weights;
size_t p = output_pixels;
for (; p >= 2; p -= 2) {
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
i += 2 * 2;
const float valphah0 = w[0];
const float valphav0 = w[1];
const float valphah1 = w[2];
const float valphav1 = w[3];
w += 2 * 2;
const float vtl0 = itl0[0];
const float vtr0 = itl0[1];
const float vbl0 = ibl0[0];
const float vbr0 = ibl0[1];
const float vtl1 = itl1[0];
const float vtr1 = itl1[1];
const float vbl1 = ibl1[0];
const float vbr1 = ibl1[1];
const float vtd0 = vtr0 - vtl0;
const float vbd0 = vbr0 - vbl0;
const float vtd1 = vtr1 - vtl1;
const float vbd1 = vbr1 - vbl1;
const float vt0 = vtl0 + vtd0 * valphah0;
const float vb0 = vbl0 + vbd0 * valphah0;
const float vt1 = vtl1 + vtd1 * valphah1;
const float vb1 = vbl1 + vbd1 * valphah1;
const float vd0 = vb0 - vt0;
const float vd1 = vb1 - vt1;
const float vo0 = vt0 + vd0 * valphav0;
const float vo1 = vt1 + vd1 * valphav1;
output[0] = vo0;
output[1] = vo1;
output += 2;
}
for (; p >= 1; p -= 1) {
const float* itl = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl = (const float*) ((uintptr_t) i[1] + input_offset);
i += 2;
const float valphah = w[0];
const float valphav = w[1];
w += 2;
const float vtl = itl[0];
const float vtr = itl[1];
const float vbl = ibl[0];
const float vbr = ibl[1];
const float vtd = vtr - vtl;
const float vbd = vbr - vbl;
const float vt = vtl + vtd * valphah;
const float vb = vbl + vbd * valphah;
const float vd = vb - vt;
const float vo = vt + vd * valphav;
*output++ = vo;
}
input_offset += input_increment;
c--;
} while (c != 0);
}
| 2,912 | 25.724771 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ibilinear-chw/gen/f32-ibilinear-chw-scalar-p4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ibilinear-chw/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/ibilinear.h>
void xnn_f32_ibilinear_chw_ukernel__scalar_p4(
size_t output_pixels,
size_t channels,
const float** restrict input,
size_t input_offset,
const float* restrict weights,
float* restrict output,
size_t input_increment)
{
assert(output_pixels != 0);
assert(channels != 0);
assert(input_increment % sizeof(float) == 0);
size_t c = channels;
do {
const float** i = input;
const float* w = weights;
size_t p = output_pixels;
for (; p >= 4; p -= 4) {
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
const float* itl2 = (const float*) ((uintptr_t) i[4] + input_offset);
const float* ibl2 = (const float*) ((uintptr_t) i[5] + input_offset);
const float* itl3 = (const float*) ((uintptr_t) i[6] + input_offset);
const float* ibl3 = (const float*) ((uintptr_t) i[7] + input_offset);
i += 4 * 2;
const float valphah0 = w[0];
const float valphav0 = w[1];
const float valphah1 = w[2];
const float valphav1 = w[3];
const float valphah2 = w[4];
const float valphav2 = w[5];
const float valphah3 = w[6];
const float valphav3 = w[7];
w += 4 * 2;
const float vtl0 = itl0[0];
const float vtr0 = itl0[1];
const float vbl0 = ibl0[0];
const float vbr0 = ibl0[1];
const float vtl1 = itl1[0];
const float vtr1 = itl1[1];
const float vbl1 = ibl1[0];
const float vbr1 = ibl1[1];
const float vtl2 = itl2[0];
const float vtr2 = itl2[1];
const float vbl2 = ibl2[0];
const float vbr2 = ibl2[1];
const float vtl3 = itl3[0];
const float vtr3 = itl3[1];
const float vbl3 = ibl3[0];
const float vbr3 = ibl3[1];
const float vtd0 = vtr0 - vtl0;
const float vbd0 = vbr0 - vbl0;
const float vtd1 = vtr1 - vtl1;
const float vbd1 = vbr1 - vbl1;
const float vtd2 = vtr2 - vtl2;
const float vbd2 = vbr2 - vbl2;
const float vtd3 = vtr3 - vtl3;
const float vbd3 = vbr3 - vbl3;
const float vt0 = vtl0 + vtd0 * valphah0;
const float vb0 = vbl0 + vbd0 * valphah0;
const float vt1 = vtl1 + vtd1 * valphah1;
const float vb1 = vbl1 + vbd1 * valphah1;
const float vt2 = vtl2 + vtd2 * valphah2;
const float vb2 = vbl2 + vbd2 * valphah2;
const float vt3 = vtl3 + vtd3 * valphah3;
const float vb3 = vbl3 + vbd3 * valphah3;
const float vd0 = vb0 - vt0;
const float vd1 = vb1 - vt1;
const float vd2 = vb2 - vt2;
const float vd3 = vb3 - vt3;
const float vo0 = vt0 + vd0 * valphav0;
const float vo1 = vt1 + vd1 * valphav1;
const float vo2 = vt2 + vd2 * valphav2;
const float vo3 = vt3 + vd3 * valphav3;
output[0] = vo0;
output[1] = vo1;
output[2] = vo2;
output[3] = vo3;
output += 4;
}
for (; p >= 1; p -= 1) {
const float* itl = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl = (const float*) ((uintptr_t) i[1] + input_offset);
i += 2;
const float valphah = w[0];
const float valphav = w[1];
w += 2;
const float vtl = itl[0];
const float vtr = itl[1];
const float vbl = ibl[0];
const float vbr = ibl[1];
const float vtd = vtr - vtl;
const float vbd = vbr - vbl;
const float vt = vtl + vtd * valphah;
const float vb = vbl + vbd * valphah;
const float vd = vb - vt;
const float vo = vt + vd * valphav;
*output++ = vo;
}
input_offset += input_increment;
c--;
} while (c != 0);
}
| 4,180 | 29.079137 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ibilinear-chw/gen/f32-ibilinear-chw-sse-p4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ibilinear-chw/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/ibilinear.h>
void xnn_f32_ibilinear_chw_ukernel__sse_p4(
size_t output_pixels,
size_t channels,
const float** restrict input,
size_t input_offset,
const float* restrict weights,
float* restrict output,
size_t input_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
assert(input_increment % sizeof(float) == 0);
do {
const float** i = input;
const float* w = weights;
size_t p = output_pixels;
for (; p >= 4; p -= 4) {
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
const float* itl2 = (const float*) ((uintptr_t) i[4] + input_offset);
const float* ibl2 = (const float*) ((uintptr_t) i[5] + input_offset);
const float* itl3 = (const float*) ((uintptr_t) i[6] + input_offset);
const float* ibl3 = (const float*) ((uintptr_t) i[7] + input_offset);
i += 8;
const __m128 vw0 = _mm_loadu_ps(w);
const __m128 vw1 = _mm_loadu_ps(w + 4);
w += 8;
const __m128 vtltr0 = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) itl0);
const __m128 vblbr0 = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) ibl0);
const __m128 vtltr2 = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) itl2);
const __m128 vblbr2 = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) ibl2);
const __m128 valphah = _mm_shuffle_ps(vw0, vw1, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 valphav = _mm_shuffle_ps(vw0, vw1, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 vtltr01 = _mm_loadh_pi(vtltr0, (const __m64*) itl1);
const __m128 vblbr01 = _mm_loadh_pi(vblbr0, (const __m64*) ibl1);
const __m128 vtltr23 = _mm_loadh_pi(vtltr2, (const __m64*) itl3);
const __m128 vblbr23 = _mm_loadh_pi(vblbr2, (const __m64*) ibl3);
const __m128 vldrd01 = _mm_sub_ps(vblbr01, vtltr01);
const __m128 vldrd23 = _mm_sub_ps(vblbr23, vtltr23);
const __m128 vld = _mm_shuffle_ps(vldrd01, vldrd23, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vrd = _mm_shuffle_ps(vldrd01, vldrd23, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 vtl = _mm_shuffle_ps(vtltr01, vtltr23, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vtr = _mm_shuffle_ps(vtltr01, vtltr23, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 vl = _mm_add_ps(vtl, _mm_mul_ps(vld, valphav));
const __m128 vr = _mm_add_ps(vtr, _mm_mul_ps(vrd, valphav));
const __m128 vd = _mm_sub_ps(vr, vl);
const __m128 vo = _mm_add_ps(vl, _mm_mul_ps(vd, valphah));
_mm_storeu_ps(output, vo);
output += 4;
}
if XNN_UNLIKELY(p != 0) {
if (p & 2) {
const __m128 vw = _mm_loadu_ps(w);
w += 4;
const __m128 valphah = _mm_shuffle_ps(vw, vw, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 valphav = _mm_shuffle_ps(vw, vw, _MM_SHUFFLE(3, 1, 3, 1));
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
i += 4;
const __m128 vtltr = _mm_loadh_pi(_mm_loadl_pi(_mm_undefined_ps(), (const __m64*) itl0), (const __m64*) itl1);
const __m128 vblbr = _mm_loadh_pi(_mm_loadl_pi(_mm_undefined_ps(), (const __m64*) ibl0), (const __m64*) ibl1);
const __m128 vldrd = _mm_sub_ps(vblbr, vtltr);
const __m128 vld = _mm_shuffle_ps(vldrd, vldrd, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vrd = _mm_shuffle_ps(vldrd, vldrd, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 vtl = _mm_shuffle_ps(vtltr, vtltr, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vtr = _mm_shuffle_ps(vtltr, vtltr, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 vl = _mm_add_ps(vtl, _mm_mul_ps(vld, valphav));
const __m128 vr = _mm_add_ps(vtr, _mm_mul_ps(vrd, valphav));
const __m128 vd = _mm_sub_ps(vr, vl);
const __m128 vo = _mm_add_ps(vl, _mm_mul_ps(vd, valphah));
_mm_storel_pi((__m64*) output, vo);
output += 2;
}
if (p & 1) {
// We are computing the following formula:
// result = (1 - alpha_h) * (1 - alpha_v) * top_left +
// alpha_h * (1 - alpha_v) * top_right +
// (1 - alpha_h) * alpha_v * bottom_left +
// alpha_h * alpha_v * bottom_right.
//
// Rearranging gives
// result = left + alpha_h * (right - left),
// where
// left = top_left + alpha_v * (bottom_left - top_left),
// right = top_right + alpha_v * (bottom_right - top_right).
const float alphah = *w;
const __m128 valphav = _mm_load_ps1(w + 1);
w += 2;
const float* itl = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl = (const float*) ((uintptr_t) i[1] + input_offset);
i += 2;
const __m128 vtltr = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) itl);
const __m128 vblbr = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) ibl);
// Compute at once
// left_diff = bottom_left - top_left
// right_diff = bottom_right - top_right
const __m128 vldrd = _mm_sub_ps(vblbr, vtltr);
const __m128 vlr = _mm_add_ps(vtltr, _mm_mul_ps(vldrd, valphav));
// Extract them and compute the result.
const float l = _mm_cvtss_f32(vlr);
const float r = _mm_cvtss_f32(_mm_shuffle_ps(vlr, vlr, 1));
*output++ = l + alphah * (r - l);
}
}
input_offset += input_increment;
} while (--channels != 0);
}
| 6,247 | 38.796178 | 118 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ibilinear-chw/gen/f32-ibilinear-chw-sse-p8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ibilinear-chw/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/ibilinear.h>
void xnn_f32_ibilinear_chw_ukernel__sse_p8(
size_t output_pixels,
size_t channels,
const float** restrict input,
size_t input_offset,
const float* restrict weights,
float* restrict output,
size_t input_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
assert(input_increment % sizeof(float) == 0);
do {
const float** i = input;
const float* w = weights;
size_t p = output_pixels;
for (; p >= 8; p -= 8) {
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
const float* itl2 = (const float*) ((uintptr_t) i[4] + input_offset);
const float* ibl2 = (const float*) ((uintptr_t) i[5] + input_offset);
const float* itl3 = (const float*) ((uintptr_t) i[6] + input_offset);
const float* ibl3 = (const float*) ((uintptr_t) i[7] + input_offset);
const float* itl4 = (const float*) ((uintptr_t) i[8] + input_offset);
const float* ibl4 = (const float*) ((uintptr_t) i[9] + input_offset);
const float* itl5 = (const float*) ((uintptr_t) i[10] + input_offset);
const float* ibl5 = (const float*) ((uintptr_t) i[11] + input_offset);
const float* itl6 = (const float*) ((uintptr_t) i[12] + input_offset);
const float* ibl6 = (const float*) ((uintptr_t) i[13] + input_offset);
const float* itl7 = (const float*) ((uintptr_t) i[14] + input_offset);
const float* ibl7 = (const float*) ((uintptr_t) i[15] + input_offset);
i += 2 * 8;
const __m128 vw0123p0 = _mm_loadu_ps(w + 0);
const __m128 vw0123p1 = _mm_loadu_ps(w + 4);
const __m128 vw4567p0 = _mm_loadu_ps(w + 8);
const __m128 vw4567p1 = _mm_loadu_ps(w + 12);
w += 2 * 8;
const __m128 vtltr0 = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) itl0);
const __m128 vblbr0 = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) ibl0);
const __m128 vtltr2 = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) itl2);
const __m128 vblbr2 = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) ibl2);
const __m128 vtltr4 = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) itl4);
const __m128 vblbr4 = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) ibl4);
const __m128 vtltr6 = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) itl6);
const __m128 vblbr6 = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) ibl6);
const __m128 valphah0123 = _mm_shuffle_ps(vw0123p0, vw0123p1, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 valphav0123 = _mm_shuffle_ps(vw0123p0, vw0123p1, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 valphah4567 = _mm_shuffle_ps(vw4567p0, vw4567p1, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 valphav4567 = _mm_shuffle_ps(vw4567p0, vw4567p1, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 vtltr01 = _mm_loadh_pi(vtltr0, (const __m64*) itl1);
const __m128 vblbr01 = _mm_loadh_pi(vblbr0, (const __m64*) ibl1);
const __m128 vtltr23 = _mm_loadh_pi(vtltr2, (const __m64*) itl3);
const __m128 vblbr23 = _mm_loadh_pi(vblbr2, (const __m64*) ibl3);
const __m128 vtltr45 = _mm_loadh_pi(vtltr4, (const __m64*) itl5);
const __m128 vblbr45 = _mm_loadh_pi(vblbr4, (const __m64*) ibl5);
const __m128 vtltr67 = _mm_loadh_pi(vtltr6, (const __m64*) itl7);
const __m128 vblbr67 = _mm_loadh_pi(vblbr6, (const __m64*) ibl7);
const __m128 vldrd01 = _mm_sub_ps(vblbr01, vtltr01);
const __m128 vldrd23 = _mm_sub_ps(vblbr23, vtltr23);
const __m128 vldrd45 = _mm_sub_ps(vblbr45, vtltr45);
const __m128 vldrd67 = _mm_sub_ps(vblbr67, vtltr67);
const __m128 vld0123 = _mm_shuffle_ps(vldrd01, vldrd23, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vrd0123 = _mm_shuffle_ps(vldrd01, vldrd23, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 vld4567 = _mm_shuffle_ps(vldrd45, vldrd67, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vrd4567 = _mm_shuffle_ps(vldrd45, vldrd67, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 vtl0123 = _mm_shuffle_ps(vtltr01, vtltr23, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vtr0123 = _mm_shuffle_ps(vtltr01, vtltr23, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 vtl4567 = _mm_shuffle_ps(vtltr45, vtltr67, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vtr4567 = _mm_shuffle_ps(vtltr45, vtltr67, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 vl0123 = _mm_add_ps(vtl0123, _mm_mul_ps(vld0123, valphav0123));
const __m128 vr0123 = _mm_add_ps(vtr0123, _mm_mul_ps(vrd0123, valphav0123));
const __m128 vl4567 = _mm_add_ps(vtl4567, _mm_mul_ps(vld4567, valphav4567));
const __m128 vr4567 = _mm_add_ps(vtr4567, _mm_mul_ps(vrd4567, valphav4567));
const __m128 vd0123 = _mm_sub_ps(vr0123, vl0123);
const __m128 vd4567 = _mm_sub_ps(vr4567, vl4567);
const __m128 vo0123 = _mm_add_ps(vl0123, _mm_mul_ps(vd0123, valphah0123));
const __m128 vo4567 = _mm_add_ps(vl4567, _mm_mul_ps(vd4567, valphah4567));
_mm_storeu_ps(output + 0, vo0123);
_mm_storeu_ps(output + 4, vo4567);
output += 8;
}
for (; p >= 4; p -= 4) {
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
const float* itl2 = (const float*) ((uintptr_t) i[4] + input_offset);
const float* ibl2 = (const float*) ((uintptr_t) i[5] + input_offset);
const float* itl3 = (const float*) ((uintptr_t) i[6] + input_offset);
const float* ibl3 = (const float*) ((uintptr_t) i[7] + input_offset);
i += 8;
const __m128 vw0 = _mm_loadu_ps(w);
const __m128 vw1 = _mm_loadu_ps(w + 4);
w += 8;
const __m128 vtltr0 = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) itl0);
const __m128 vblbr0 = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) ibl0);
const __m128 vtltr2 = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) itl2);
const __m128 vblbr2 = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) ibl2);
const __m128 valphah = _mm_shuffle_ps(vw0, vw1, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 valphav = _mm_shuffle_ps(vw0, vw1, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 vtltr01 = _mm_loadh_pi(vtltr0, (const __m64*) itl1);
const __m128 vblbr01 = _mm_loadh_pi(vblbr0, (const __m64*) ibl1);
const __m128 vtltr23 = _mm_loadh_pi(vtltr2, (const __m64*) itl3);
const __m128 vblbr23 = _mm_loadh_pi(vblbr2, (const __m64*) ibl3);
const __m128 vldrd01 = _mm_sub_ps(vblbr01, vtltr01);
const __m128 vldrd23 = _mm_sub_ps(vblbr23, vtltr23);
const __m128 vld = _mm_shuffle_ps(vldrd01, vldrd23, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vrd = _mm_shuffle_ps(vldrd01, vldrd23, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 vtl = _mm_shuffle_ps(vtltr01, vtltr23, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vtr = _mm_shuffle_ps(vtltr01, vtltr23, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 vl = _mm_add_ps(vtl, _mm_mul_ps(vld, valphav));
const __m128 vr = _mm_add_ps(vtr, _mm_mul_ps(vrd, valphav));
const __m128 vd = _mm_sub_ps(vr, vl);
const __m128 vo = _mm_add_ps(vl, _mm_mul_ps(vd, valphah));
_mm_storeu_ps(output, vo);
output += 4;
}
if XNN_UNLIKELY(p != 0) {
if (p & 2) {
const __m128 vw = _mm_loadu_ps(w);
w += 4;
const __m128 valphah = _mm_shuffle_ps(vw, vw, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 valphav = _mm_shuffle_ps(vw, vw, _MM_SHUFFLE(3, 1, 3, 1));
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
i += 4;
const __m128 vtltr = _mm_loadh_pi(_mm_loadl_pi(_mm_undefined_ps(), (const __m64*) itl0), (const __m64*) itl1);
const __m128 vblbr = _mm_loadh_pi(_mm_loadl_pi(_mm_undefined_ps(), (const __m64*) ibl0), (const __m64*) ibl1);
const __m128 vldrd = _mm_sub_ps(vblbr, vtltr);
const __m128 vld = _mm_shuffle_ps(vldrd, vldrd, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vrd = _mm_shuffle_ps(vldrd, vldrd, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 vtl = _mm_shuffle_ps(vtltr, vtltr, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vtr = _mm_shuffle_ps(vtltr, vtltr, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 vl = _mm_add_ps(vtl, _mm_mul_ps(vld, valphav));
const __m128 vr = _mm_add_ps(vtr, _mm_mul_ps(vrd, valphav));
const __m128 vd = _mm_sub_ps(vr, vl);
const __m128 vo = _mm_add_ps(vl, _mm_mul_ps(vd, valphah));
_mm_storel_pi((__m64*) output, vo);
output += 2;
}
if (p & 1) {
// We are computing the following formula:
// result = (1 - alpha_h) * (1 - alpha_v) * top_left +
// alpha_h * (1 - alpha_v) * top_right +
// (1 - alpha_h) * alpha_v * bottom_left +
// alpha_h * alpha_v * bottom_right.
//
// Rearranging gives
// result = left + alpha_h * (right - left),
// where
// left = top_left + alpha_v * (bottom_left - top_left),
// right = top_right + alpha_v * (bottom_right - top_right).
const float alphah = *w;
const __m128 valphav = _mm_load_ps1(w + 1);
w += 2;
const float* itl = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl = (const float*) ((uintptr_t) i[1] + input_offset);
i += 2;
const __m128 vtltr = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) itl);
const __m128 vblbr = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) ibl);
// Compute at once
// left_diff = bottom_left - top_left
// right_diff = bottom_right - top_right
const __m128 vldrd = _mm_sub_ps(vblbr, vtltr);
const __m128 vlr = _mm_add_ps(vtltr, _mm_mul_ps(vldrd, valphav));
// Extract them and compute the result.
const float l = _mm_cvtss_f32(vlr);
const float r = _mm_cvtss_f32(_mm_shuffle_ps(vlr, vlr, 1));
*output++ = l + alphah * (r - l);
}
}
input_offset += input_increment;
} while (--channels != 0);
}
| 11,019 | 45.893617 | 118 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ibilinear-chw/gen/f32-ibilinear-chw-wasmsimd-p4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ibilinear-chw/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/ibilinear.h>
void xnn_f32_ibilinear_chw_ukernel__wasmsimd_p4(
size_t output_pixels,
size_t channels,
const float** restrict input,
size_t input_offset,
const float* restrict weights,
float* restrict output,
size_t input_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
assert(input_increment % sizeof(float) == 0);
do {
const float** i = input;
const float* w = weights;
size_t p = output_pixels;
for (; p >= 4; p -= 4) {
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
const float* itl2 = (const float*) ((uintptr_t) i[4] + input_offset);
const float* ibl2 = (const float*) ((uintptr_t) i[5] + input_offset);
const float* itl3 = (const float*) ((uintptr_t) i[6] + input_offset);
const float* ibl3 = (const float*) ((uintptr_t) i[7] + input_offset);
i += 8;
const v128_t vw0 = wasm_v128_load(w);
const v128_t vw1 = wasm_v128_load(w + 4);
w += 8;
const v128_t vtltr0 = wasm_v128_load64_splat(itl0);
const v128_t vblbr0 = wasm_v128_load64_splat(ibl0);
const v128_t vtltr2 = wasm_v128_load64_splat(itl2);
const v128_t vblbr2 = wasm_v128_load64_splat(ibl2);
const v128_t vtltr01 = wasm_v128_load64_lane(itl1, vtltr0, 1);
const v128_t vblbr01 = wasm_v128_load64_lane(ibl1, vblbr0, 1);
const v128_t vtltr23 = wasm_v128_load64_lane(itl3, vtltr2, 1);
const v128_t vblbr23 = wasm_v128_load64_lane(ibl3, vblbr2, 1);
const v128_t valphah = wasm_v32x4_shuffle(vw0, vw1, 0, 2, 4, 6);
const v128_t valphav = wasm_v32x4_shuffle(vw0, vw1, 1, 3, 5, 7);
const v128_t vldrd01 = wasm_f32x4_sub(vblbr01, vtltr01);
const v128_t vldrd23 = wasm_f32x4_sub(vblbr23, vtltr23);
const v128_t vld = wasm_v32x4_shuffle(vldrd01, vldrd23, 0, 2, 4, 6);
const v128_t vrd = wasm_v32x4_shuffle(vldrd01, vldrd23, 1, 3, 5, 7);
const v128_t vtl = wasm_v32x4_shuffle(vtltr01, vtltr23, 0, 2, 4, 6);
const v128_t vtr = wasm_v32x4_shuffle(vtltr01, vtltr23, 1, 3, 5, 7);
const v128_t vl = wasm_f32x4_add(vtl, wasm_f32x4_mul(vld, valphav));
const v128_t vr = wasm_f32x4_add(vtr, wasm_f32x4_mul(vrd, valphav));
const v128_t vd = wasm_f32x4_sub(vr, vl);
const v128_t vo = wasm_f32x4_add(vl, wasm_f32x4_mul(vd, valphah));
wasm_v128_store(output, vo);
output += 4;
}
if XNN_UNLIKELY(p != 0) {
if (p & 2) {
const v128_t vw = wasm_v128_load(w);
w += 4;
const v128_t valphah = wasm_v32x4_shuffle(vw, vw, 0, 2, 0, 2);
const v128_t valphav = wasm_v32x4_shuffle(vw, vw, 1, 3, 1, 3);
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
i += 4;
const v128_t vtltr = wasm_v128_load64_lane(itl1, wasm_v128_load64_zero(itl0), 1);
const v128_t vblbr = wasm_v128_load64_lane(ibl1, wasm_v128_load64_zero(ibl0), 1);
const v128_t vldrd = wasm_f32x4_sub(vblbr, vtltr);
const v128_t vld = wasm_v32x4_shuffle(vldrd, vldrd, 0, 2, 0, 2);
const v128_t vrd = wasm_v32x4_shuffle(vldrd, vldrd, 1, 3, 1, 3);
const v128_t vtl = wasm_v32x4_shuffle(vtltr, vtltr, 0, 2, 0, 2);
const v128_t vtr = wasm_v32x4_shuffle(vtltr, vtltr, 1, 3, 1, 3);
const v128_t vl = wasm_f32x4_add(vtl, wasm_f32x4_mul(vld, valphav));
const v128_t vr = wasm_f32x4_add(vtr, wasm_f32x4_mul(vrd, valphav));
const v128_t vd = wasm_f32x4_sub(vr, vl);
const v128_t vo = wasm_f32x4_add(vl, wasm_f32x4_mul(vd, valphah));
wasm_v128_store64_lane(output, vo, 0);
output += 2;
}
if (p & 1) {
// We are computing the following formula:
// result = (1 - alpha_h) * (1 - alpha_v) * top_left +
// alpha_h * (1 - alpha_v) * top_right +
// (1 - alpha_h) * alpha_v * bottom_left +
// alpha_h * alpha_v * bottom_right.
//
// Rearranging gives
// result = left + alpha_h * (right - left),
// where
// left = top_left + alpha_v * (bottom_left - top_left),
// right = top_right + alpha_v * (bottom_right - top_right).
const float alphah = *w;
const v128_t valphav = wasm_v128_load32_splat(w + 1);
w += 2;
const float* itl = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl = (const float*) ((uintptr_t) i[1] + input_offset);
i += 2;
const v128_t vtltr = wasm_v128_load64_zero(itl);
const v128_t vblbr = wasm_v128_load64_zero(ibl);
// Compute at once
// left_diff = bottom_left - top_left
// right_diff = bottom_right - top_right
const v128_t vldrd = wasm_f32x4_sub(vblbr, vtltr);
const v128_t vlr = wasm_f32x4_add(vtltr, wasm_f32x4_mul(vldrd, valphav));
// Extract them and compute the result.
const float l = wasm_f32x4_extract_lane(vlr, 0);
const float r = wasm_f32x4_extract_lane(vlr, 1);
*output++ = l + alphah * (r - l);
}
}
input_offset += input_increment;
} while (--channels != 0);
}
| 6,033 | 37.433121 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-ibilinear-chw/gen/f32-ibilinear-chw-wasmsimd-p8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-ibilinear-chw/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/ibilinear.h>
void xnn_f32_ibilinear_chw_ukernel__wasmsimd_p8(
size_t output_pixels,
size_t channels,
const float** restrict input,
size_t input_offset,
const float* restrict weights,
float* restrict output,
size_t input_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
assert(input_increment % sizeof(float) == 0);
do {
const float** i = input;
const float* w = weights;
size_t p = output_pixels;
for (; p >= 8; p -= 8) {
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
const float* itl2 = (const float*) ((uintptr_t) i[4] + input_offset);
const float* ibl2 = (const float*) ((uintptr_t) i[5] + input_offset);
const float* itl3 = (const float*) ((uintptr_t) i[6] + input_offset);
const float* ibl3 = (const float*) ((uintptr_t) i[7] + input_offset);
const float* itl4 = (const float*) ((uintptr_t) i[8] + input_offset);
const float* ibl4 = (const float*) ((uintptr_t) i[9] + input_offset);
const float* itl5 = (const float*) ((uintptr_t) i[10] + input_offset);
const float* ibl5 = (const float*) ((uintptr_t) i[11] + input_offset);
const float* itl6 = (const float*) ((uintptr_t) i[12] + input_offset);
const float* ibl6 = (const float*) ((uintptr_t) i[13] + input_offset);
const float* itl7 = (const float*) ((uintptr_t) i[14] + input_offset);
const float* ibl7 = (const float*) ((uintptr_t) i[15] + input_offset);
i += 2 * 8;
const v128_t vw0123p0 = wasm_v128_load(w + 0);
const v128_t vw0123p1 = wasm_v128_load(w + 4);
const v128_t vw4567p0 = wasm_v128_load(w + 8);
const v128_t vw4567p1 = wasm_v128_load(w + 12);
w += 2 * 8;
const v128_t vtltr0 = wasm_v128_load64_zero(itl0);
const v128_t vblbr0 = wasm_v128_load64_zero(ibl0);
const v128_t vtltr2 = wasm_v128_load64_zero(itl2);
const v128_t vblbr2 = wasm_v128_load64_zero(ibl2);
const v128_t vtltr4 = wasm_v128_load64_zero(itl4);
const v128_t vblbr4 = wasm_v128_load64_zero(ibl4);
const v128_t vtltr6 = wasm_v128_load64_zero(itl6);
const v128_t vblbr6 = wasm_v128_load64_zero(ibl6);
const v128_t vtltr01 = wasm_v128_load64_lane(itl1, vtltr0, 1);
const v128_t vblbr01 = wasm_v128_load64_lane(ibl1, vblbr0, 1);
const v128_t vtltr23 = wasm_v128_load64_lane(itl3, vtltr2, 1);
const v128_t vblbr23 = wasm_v128_load64_lane(ibl3, vblbr2, 1);
const v128_t vtltr45 = wasm_v128_load64_lane(itl5, vtltr4, 1);
const v128_t vblbr45 = wasm_v128_load64_lane(ibl5, vblbr4, 1);
const v128_t vtltr67 = wasm_v128_load64_lane(itl7, vtltr6, 1);
const v128_t vblbr67 = wasm_v128_load64_lane(ibl7, vblbr6, 1);
const v128_t valphah0123 = wasm_v32x4_shuffle(vw0123p0, vw0123p1, 0, 2, 4, 6);
const v128_t valphav0123 = wasm_v32x4_shuffle(vw0123p0, vw0123p1, 1, 3, 5, 7);
const v128_t valphah4567 = wasm_v32x4_shuffle(vw4567p0, vw4567p1, 0, 2, 4, 6);
const v128_t valphav4567 = wasm_v32x4_shuffle(vw4567p0, vw4567p1, 1, 3, 5, 7);
const v128_t vldrd01 = wasm_f32x4_sub(vblbr01, vtltr01);
const v128_t vldrd23 = wasm_f32x4_sub(vblbr23, vtltr23);
const v128_t vldrd45 = wasm_f32x4_sub(vblbr45, vtltr45);
const v128_t vldrd67 = wasm_f32x4_sub(vblbr67, vtltr67);
const v128_t vld0123 = wasm_v32x4_shuffle(vldrd01, vldrd23, 0, 2, 4, 6);
const v128_t vrd0123 = wasm_v32x4_shuffle(vldrd01, vldrd23, 1, 3, 5, 7);
const v128_t vld4567 = wasm_v32x4_shuffle(vldrd45, vldrd67, 0, 2, 4, 6);
const v128_t vrd4567 = wasm_v32x4_shuffle(vldrd45, vldrd67, 1, 3, 5, 7);
const v128_t vtl0123 = wasm_v32x4_shuffle(vtltr01, vtltr23, 0, 2, 4, 6);
const v128_t vtr0123 = wasm_v32x4_shuffle(vtltr01, vtltr23, 1, 3, 5, 7);
const v128_t vtl4567 = wasm_v32x4_shuffle(vtltr45, vtltr67, 0, 2, 4, 6);
const v128_t vtr4567 = wasm_v32x4_shuffle(vtltr45, vtltr67, 1, 3, 5, 7);
const v128_t vl0123 = wasm_f32x4_add(vtl0123, wasm_f32x4_mul(vld0123, valphav0123));
const v128_t vr0123 = wasm_f32x4_add(vtr0123, wasm_f32x4_mul(vrd0123, valphav0123));
const v128_t vl4567 = wasm_f32x4_add(vtl4567, wasm_f32x4_mul(vld4567, valphav4567));
const v128_t vr4567 = wasm_f32x4_add(vtr4567, wasm_f32x4_mul(vrd4567, valphav4567));
const v128_t vd0123 = wasm_f32x4_sub(vr0123, vl0123);
const v128_t vd4567 = wasm_f32x4_sub(vr4567, vl4567);
const v128_t vo0123 = wasm_f32x4_add(vl0123, wasm_f32x4_mul(vd0123, valphah0123));
const v128_t vo4567 = wasm_f32x4_add(vl4567, wasm_f32x4_mul(vd4567, valphah4567));
wasm_v128_store(output + 0, vo0123);
wasm_v128_store(output + 4, vo4567);
output += 8;
}
for (; p >= 4; p -= 4) {
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
const float* itl2 = (const float*) ((uintptr_t) i[4] + input_offset);
const float* ibl2 = (const float*) ((uintptr_t) i[5] + input_offset);
const float* itl3 = (const float*) ((uintptr_t) i[6] + input_offset);
const float* ibl3 = (const float*) ((uintptr_t) i[7] + input_offset);
i += 8;
const v128_t vw0 = wasm_v128_load(w);
const v128_t vw1 = wasm_v128_load(w + 4);
w += 8;
const v128_t vtltr0 = wasm_v128_load64_splat(itl0);
const v128_t vblbr0 = wasm_v128_load64_splat(ibl0);
const v128_t vtltr2 = wasm_v128_load64_splat(itl2);
const v128_t vblbr2 = wasm_v128_load64_splat(ibl2);
const v128_t vtltr01 = wasm_v128_load64_lane(itl1, vtltr0, 1);
const v128_t vblbr01 = wasm_v128_load64_lane(ibl1, vblbr0, 1);
const v128_t vtltr23 = wasm_v128_load64_lane(itl3, vtltr2, 1);
const v128_t vblbr23 = wasm_v128_load64_lane(ibl3, vblbr2, 1);
const v128_t valphah = wasm_v32x4_shuffle(vw0, vw1, 0, 2, 4, 6);
const v128_t valphav = wasm_v32x4_shuffle(vw0, vw1, 1, 3, 5, 7);
const v128_t vldrd01 = wasm_f32x4_sub(vblbr01, vtltr01);
const v128_t vldrd23 = wasm_f32x4_sub(vblbr23, vtltr23);
const v128_t vld = wasm_v32x4_shuffle(vldrd01, vldrd23, 0, 2, 4, 6);
const v128_t vrd = wasm_v32x4_shuffle(vldrd01, vldrd23, 1, 3, 5, 7);
const v128_t vtl = wasm_v32x4_shuffle(vtltr01, vtltr23, 0, 2, 4, 6);
const v128_t vtr = wasm_v32x4_shuffle(vtltr01, vtltr23, 1, 3, 5, 7);
const v128_t vl = wasm_f32x4_add(vtl, wasm_f32x4_mul(vld, valphav));
const v128_t vr = wasm_f32x4_add(vtr, wasm_f32x4_mul(vrd, valphav));
const v128_t vd = wasm_f32x4_sub(vr, vl);
const v128_t vo = wasm_f32x4_add(vl, wasm_f32x4_mul(vd, valphah));
wasm_v128_store(output, vo);
output += 4;
}
if XNN_UNLIKELY(p != 0) {
if (p & 2) {
const v128_t vw = wasm_v128_load(w);
w += 4;
const v128_t valphah = wasm_v32x4_shuffle(vw, vw, 0, 2, 0, 2);
const v128_t valphav = wasm_v32x4_shuffle(vw, vw, 1, 3, 1, 3);
const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
i += 4;
const v128_t vtltr = wasm_v128_load64_lane(itl1, wasm_v128_load64_zero(itl0), 1);
const v128_t vblbr = wasm_v128_load64_lane(ibl1, wasm_v128_load64_zero(ibl0), 1);
const v128_t vldrd = wasm_f32x4_sub(vblbr, vtltr);
const v128_t vld = wasm_v32x4_shuffle(vldrd, vldrd, 0, 2, 0, 2);
const v128_t vrd = wasm_v32x4_shuffle(vldrd, vldrd, 1, 3, 1, 3);
const v128_t vtl = wasm_v32x4_shuffle(vtltr, vtltr, 0, 2, 0, 2);
const v128_t vtr = wasm_v32x4_shuffle(vtltr, vtltr, 1, 3, 1, 3);
const v128_t vl = wasm_f32x4_add(vtl, wasm_f32x4_mul(vld, valphav));
const v128_t vr = wasm_f32x4_add(vtr, wasm_f32x4_mul(vrd, valphav));
const v128_t vd = wasm_f32x4_sub(vr, vl);
const v128_t vo = wasm_f32x4_add(vl, wasm_f32x4_mul(vd, valphah));
wasm_v128_store64_lane(output, vo, 0);
output += 2;
}
if (p & 1) {
// We are computing the following formula:
// result = (1 - alpha_h) * (1 - alpha_v) * top_left +
// alpha_h * (1 - alpha_v) * top_right +
// (1 - alpha_h) * alpha_v * bottom_left +
// alpha_h * alpha_v * bottom_right.
//
// Rearranging gives
// result = left + alpha_h * (right - left),
// where
// left = top_left + alpha_v * (bottom_left - top_left),
// right = top_right + alpha_v * (bottom_right - top_right).
const float alphah = *w;
const v128_t valphav = wasm_v128_load32_splat(w + 1);
w += 2;
const float* itl = (const float*) ((uintptr_t) i[0] + input_offset);
const float* ibl = (const float*) ((uintptr_t) i[1] + input_offset);
i += 2;
const v128_t vtltr = wasm_v128_load64_zero(itl);
const v128_t vblbr = wasm_v128_load64_zero(ibl);
// Compute at once
// left_diff = bottom_left - top_left
// right_diff = bottom_right - top_right
const v128_t vldrd = wasm_f32x4_sub(vblbr, vtltr);
const v128_t vlr = wasm_f32x4_add(vtltr, wasm_f32x4_mul(vldrd, valphav));
// Extract them and compute the result.
const float l = wasm_f32x4_extract_lane(vlr, 0);
const float r = wasm_f32x4_extract_lane(vlr, 1);
*output++ = l + alphah * (r - l);
}
}
input_offset += input_increment;
} while (--channels != 0);
}
| 10,549 | 43.893617 | 90 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.