repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8-minmax-wasmsimd-x86-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,236 | 36.611872 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8-relu-wasmrelaxedsimd-fma-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_relu_ukernel_3x8__wasmrelaxedsimd_fma_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
k -= sizeof(float);
} while (k != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,342 | 28.746575 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8-relu-wasmrelaxedsimd-fma-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_relu_ukernel_3x8__wasmrelaxedsimd_fma_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
k -= sizeof(float);
} while (k != 0);
}
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,932 | 36.597156 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8-relu-wasmsimd-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_relu_ukernel_3x8__wasmsimd_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
k -= sizeof(float);
} while (k != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,313 | 28.547945 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8-relu-wasmsimd-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_relu_ukernel_3x8__wasmsimd_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
k -= sizeof(float);
} while (k != 0);
}
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,831 | 36.118483 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8-wasmrelaxedsimd-fma-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_ukernel_3x8__wasmrelaxedsimd_fma_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
k -= sizeof(float);
} while (k != 0);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,976 | 27.611511 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8-wasmrelaxedsimd-fma-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_ukernel_3x8__wasmrelaxedsimd_fma_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
k -= sizeof(float);
} while (k != 0);
}
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,566 | 36.093137 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8-wasmsimd-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_ukernel_3x8__wasmsimd_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
k -= sizeof(float);
} while (k != 0);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,947 | 27.402878 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8-wasmsimd-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_ukernel_3x8__wasmsimd_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
k -= sizeof(float);
} while (k != 0);
}
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,465 | 35.598039 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8s4-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_3x8s4__sse(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
__m128 vacc0x0123 = _mm_load_ps(w + 0);
__m128 vacc0x4567 = _mm_load_ps(w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
__m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
__m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
__m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
__m128 va0 = _mm_loadu_ps(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
__m128 va1 = _mm_loadu_ps(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
__m128 va2 = _mm_loadu_ps(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va0), vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va1), vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va2), vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va0), vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va1), vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va2), vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va0), vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va1), vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va2), vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va0), vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va1), vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va2), vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va0), vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va1), vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va2), vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va0), vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va1), vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va2), vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va0), vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va1), vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va2), vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va0), vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va1), vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va2), vb4567c3));
w += 32;
}
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 10,922 | 42.003937 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8s4-minmax-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_3x8s4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567);
w += 32;
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,645 | 43.62069 | 130 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8s4-minmax-wasmrelaxedsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_3x8s4__wasmrelaxedsimd(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
w += 32;
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,497 | 43.05364 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8s4-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_3x8s4__wasmsimd_arm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
w += 32;
}
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,278 | 42.214559 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8s4-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_3x8s4__wasmsimd_x86(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
w += 32;
}
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,290 | 42.260536 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8s4-relu-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_relu_ukernel_3x8s4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567);
w += 32;
}
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,040 | 42.640316 | 130 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8s4-relu-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_relu_ukernel_3x8s4__wasmsimd(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
w += 32;
}
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,885 | 42.027668 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8s4-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_ukernel_3x8s4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567);
w += 32;
}
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,674 | 42.394309 | 130 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8s4-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_ukernel_3x8s4__wasmsimd(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
w += 32;
}
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,519 | 41.764228 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x16-minmax-aarch64-neonfma-lane-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x16__aarch64_neonfma_lane_ld128(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0x89AB = vld1q_f32(w); w += 4;
float32x4_t vacc0xCDEF = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc1x89AB = vacc0x89AB;
float32x4_t vacc1xCDEF = vacc0xCDEF;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc2x89AB = vacc0x89AB;
float32x4_t vacc2xCDEF = vacc0xCDEF;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc3x89AB = vacc0x89AB;
float32x4_t vacc3xCDEF = vacc0xCDEF;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t va1 = vld1q_f32(a1); a1 += 4;
const float32x4_t va2 = vld1q_f32(a2); a2 += 4;
const float32x4_t va3 = vld1q_f32(a3); a3 += 4;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t vb89ABc0 = vld1q_f32(w); w += 4;
const float32x4_t vbCDEFc0 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, vget_low_f32(va0), 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, vget_low_f32(va1), 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, vget_low_f32(va2), 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, vget_low_f32(va3), 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, vget_low_f32(va0), 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c0, vget_low_f32(va1), 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c0, vget_low_f32(va2), 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c0, vget_low_f32(va3), 0);
vacc0x89AB = vfmaq_lane_f32(vacc0x89AB, vb89ABc0, vget_low_f32(va0), 0);
vacc1x89AB = vfmaq_lane_f32(vacc1x89AB, vb89ABc0, vget_low_f32(va1), 0);
vacc2x89AB = vfmaq_lane_f32(vacc2x89AB, vb89ABc0, vget_low_f32(va2), 0);
vacc3x89AB = vfmaq_lane_f32(vacc3x89AB, vb89ABc0, vget_low_f32(va3), 0);
vacc0xCDEF = vfmaq_lane_f32(vacc0xCDEF, vbCDEFc0, vget_low_f32(va0), 0);
vacc1xCDEF = vfmaq_lane_f32(vacc1xCDEF, vbCDEFc0, vget_low_f32(va1), 0);
vacc2xCDEF = vfmaq_lane_f32(vacc2xCDEF, vbCDEFc0, vget_low_f32(va2), 0);
vacc3xCDEF = vfmaq_lane_f32(vacc3xCDEF, vbCDEFc0, vget_low_f32(va3), 0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
const float32x4_t vb89ABc1 = vld1q_f32(w); w += 4;
const float32x4_t vbCDEFc1 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, vget_low_f32(va0), 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, vget_low_f32(va1), 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, vget_low_f32(va2), 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, vget_low_f32(va3), 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, vget_low_f32(va0), 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c1, vget_low_f32(va1), 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c1, vget_low_f32(va2), 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c1, vget_low_f32(va3), 1);
vacc0x89AB = vfmaq_lane_f32(vacc0x89AB, vb89ABc1, vget_low_f32(va0), 1);
vacc1x89AB = vfmaq_lane_f32(vacc1x89AB, vb89ABc1, vget_low_f32(va1), 1);
vacc2x89AB = vfmaq_lane_f32(vacc2x89AB, vb89ABc1, vget_low_f32(va2), 1);
vacc3x89AB = vfmaq_lane_f32(vacc3x89AB, vb89ABc1, vget_low_f32(va3), 1);
vacc0xCDEF = vfmaq_lane_f32(vacc0xCDEF, vbCDEFc1, vget_low_f32(va0), 1);
vacc1xCDEF = vfmaq_lane_f32(vacc1xCDEF, vbCDEFc1, vget_low_f32(va1), 1);
vacc2xCDEF = vfmaq_lane_f32(vacc2xCDEF, vbCDEFc1, vget_low_f32(va2), 1);
vacc3xCDEF = vfmaq_lane_f32(vacc3xCDEF, vbCDEFc1, vget_low_f32(va3), 1);
const float32x4_t vb0123c2 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c2 = vld1q_f32(w); w += 4;
const float32x4_t vb89ABc2 = vld1q_f32(w); w += 4;
const float32x4_t vbCDEFc2 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c2, vget_high_f32(va0), 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c2, vget_high_f32(va1), 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c2, vget_high_f32(va2), 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c2, vget_high_f32(va3), 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c2, vget_high_f32(va0), 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c2, vget_high_f32(va1), 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c2, vget_high_f32(va2), 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c2, vget_high_f32(va3), 0);
vacc0x89AB = vfmaq_lane_f32(vacc0x89AB, vb89ABc2, vget_high_f32(va0), 0);
vacc1x89AB = vfmaq_lane_f32(vacc1x89AB, vb89ABc2, vget_high_f32(va1), 0);
vacc2x89AB = vfmaq_lane_f32(vacc2x89AB, vb89ABc2, vget_high_f32(va2), 0);
vacc3x89AB = vfmaq_lane_f32(vacc3x89AB, vb89ABc2, vget_high_f32(va3), 0);
vacc0xCDEF = vfmaq_lane_f32(vacc0xCDEF, vbCDEFc2, vget_high_f32(va0), 0);
vacc1xCDEF = vfmaq_lane_f32(vacc1xCDEF, vbCDEFc2, vget_high_f32(va1), 0);
vacc2xCDEF = vfmaq_lane_f32(vacc2xCDEF, vbCDEFc2, vget_high_f32(va2), 0);
vacc3xCDEF = vfmaq_lane_f32(vacc3xCDEF, vbCDEFc2, vget_high_f32(va3), 0);
const float32x4_t vb0123c3 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c3 = vld1q_f32(w); w += 4;
const float32x4_t vb89ABc3 = vld1q_f32(w); w += 4;
const float32x4_t vbCDEFc3 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c3, vget_high_f32(va0), 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c3, vget_high_f32(va1), 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c3, vget_high_f32(va2), 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c3, vget_high_f32(va3), 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c3, vget_high_f32(va0), 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c3, vget_high_f32(va1), 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c3, vget_high_f32(va2), 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c3, vget_high_f32(va3), 1);
vacc0x89AB = vfmaq_lane_f32(vacc0x89AB, vb89ABc3, vget_high_f32(va0), 1);
vacc1x89AB = vfmaq_lane_f32(vacc1x89AB, vb89ABc3, vget_high_f32(va1), 1);
vacc2x89AB = vfmaq_lane_f32(vacc2x89AB, vb89ABc3, vget_high_f32(va2), 1);
vacc3x89AB = vfmaq_lane_f32(vacc3x89AB, vb89ABc3, vget_high_f32(va3), 1);
vacc0xCDEF = vfmaq_lane_f32(vacc0xCDEF, vbCDEFc3, vget_high_f32(va0), 1);
vacc1xCDEF = vfmaq_lane_f32(vacc1xCDEF, vbCDEFc3, vget_high_f32(va1), 1);
vacc2xCDEF = vfmaq_lane_f32(vacc2xCDEF, vbCDEFc3, vget_high_f32(va2), 1);
vacc3xCDEF = vfmaq_lane_f32(vacc3xCDEF, vbCDEFc3, vget_high_f32(va3), 1);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
const float32x4_t vb89AB = vld1q_f32(w); w += 4;
const float32x4_t vbCDEF = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
vacc0x89AB = vfmaq_f32(vacc0x89AB, va0, vb89AB);
vacc1x89AB = vfmaq_f32(vacc1x89AB, va1, vb89AB);
vacc2x89AB = vfmaq_f32(vacc2x89AB, va2, vb89AB);
vacc3x89AB = vfmaq_f32(vacc3x89AB, va3, vb89AB);
vacc0xCDEF = vfmaq_f32(vacc0xCDEF, va0, vbCDEF);
vacc1xCDEF = vfmaq_f32(vacc1xCDEF, va1, vbCDEF);
vacc2xCDEF = vfmaq_f32(vacc2xCDEF, va2, vbCDEF);
vacc3xCDEF = vfmaq_f32(vacc3xCDEF, va3, vbCDEF);
k -= sizeof(float);
} while (k != 0);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc0x89AB = vminq_f32(vacc0x89AB, vmax);
vacc1x89AB = vminq_f32(vacc1x89AB, vmax);
vacc2x89AB = vminq_f32(vacc2x89AB, vmax);
vacc3x89AB = vminq_f32(vacc3x89AB, vmax);
vacc0xCDEF = vminq_f32(vacc0xCDEF, vmax);
vacc1xCDEF = vminq_f32(vacc1xCDEF, vmax);
vacc2xCDEF = vminq_f32(vacc2xCDEF, vmax);
vacc3xCDEF = vminq_f32(vacc3xCDEF, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc0x89AB = vmaxq_f32(vacc0x89AB, vmin);
vacc1x89AB = vmaxq_f32(vacc1x89AB, vmin);
vacc2x89AB = vmaxq_f32(vacc2x89AB, vmin);
vacc3x89AB = vmaxq_f32(vacc3x89AB, vmin);
vacc0xCDEF = vmaxq_f32(vacc0xCDEF, vmin);
vacc1xCDEF = vmaxq_f32(vacc1xCDEF, vmin);
vacc2xCDEF = vmaxq_f32(vacc2xCDEF, vmin);
vacc3xCDEF = vmaxq_f32(vacc3xCDEF, vmin);
if XNN_LIKELY(nc >= 16) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
vst1q_f32(c3 + 8, vacc3x89AB);
vst1q_f32(c3 + 12, vacc3xCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
vst1q_f32(c2 + 8, vacc2x89AB);
vst1q_f32(c2 + 12, vacc2xCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
vst1q_f32(c1 + 8, vacc1x89AB);
vst1q_f32(c1 + 12, vacc1xCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
vst1q_f32(c0 + 8, vacc0x89AB);
vst1q_f32(c0 + 12, vacc0xCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 8) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vst1q_f32(c3, vacc3x4567); c3 += 4;
vst1q_f32(c2, vacc2x4567); c2 += 4;
vst1q_f32(c1, vacc1x4567); c1 += 4;
vst1q_f32(c0, vacc0x4567); c0 += 4;
vacc3x0123 = vacc3x89AB;
vacc3x4567 = vacc3xCDEF;
vacc2x0123 = vacc2x89AB;
vacc2x4567 = vacc2xCDEF;
vacc1x0123 = vacc1x89AB;
vacc1x4567 = vacc1xCDEF;
vacc0x0123 = vacc0x89AB;
vacc0x4567 = vacc0xCDEF;
}
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc3x4567 = vacc3x89AB;
vacc3x89AB = vacc3xCDEF;
vacc2x0123 = vacc2x4567;
vacc2x4567 = vacc2x89AB;
vacc2x89AB = vacc2xCDEF;
vacc1x0123 = vacc1x4567;
vacc1x4567 = vacc1x89AB;
vacc1x89AB = vacc1xCDEF;
vacc0x0123 = vacc0x4567;
vacc0x4567 = vacc0x89AB;
vacc0x89AB = vacc0xCDEF;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,658 | 42.627976 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x16-minmax-avx-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x16__avx_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(w + 0);
__m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
w += 16;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
w += 16;
vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF));
vacc1x89ABCDEF = _mm256_add_ps(vacc1x89ABCDEF, _mm256_mul_ps(va1, vb89ABCDEF));
vacc2x89ABCDEF = _mm256_add_ps(vacc2x89ABCDEF, _mm256_mul_ps(va2, vb89ABCDEF));
vacc3x89ABCDEF = _mm256_add_ps(vacc3x89ABCDEF, _mm256_mul_ps(va3, vb89ABCDEF));
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_max_ps(vmin, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_max_ps(vmin, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_max_ps(vmin, vacc3x89ABCDEF);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_min_ps(vmax, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_min_ps(vmax, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_min_ps(vmax, vacc3x89ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
_mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 8) {
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c0, vacc0x01234567);
vacc3x01234567 = vacc3x89ABCDEF;
vacc2x01234567 = vacc2x89ABCDEF;
vacc1x01234567 = vacc1x89ABCDEF;
vacc0x01234567 = vacc0x89ABCDEF;
c3 += 8;
c2 += 8;
c1 += 8;
c0 += 8;
}
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 6,809 | 32.880597 | 85 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x16-minmax-avx512f-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx512-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_gemm_minmax_ukernel_4x16__avx512f_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m512 vacc0x0123456789ABCDEF = _mm512_load_ps(w);
__m512 vacc1x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc2x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc3x0123456789ABCDEF = vacc0x0123456789ABCDEF;
w += 16;
size_t k = kc;
do {
const __m512 vb0123456789ABCDEF = _mm512_load_ps(w);
w += 16;
const __m512 va0 = _mm512_set1_ps(*a0);
vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
const __m512 va1 = _mm512_set1_ps(*a1);
vacc1x0123456789ABCDEF = _mm512_fmadd_ps(va1, vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
const __m512 va2 = _mm512_set1_ps(*a2);
vacc2x0123456789ABCDEF = _mm512_fmadd_ps(va2, vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
const __m512 va3 = _mm512_set1_ps(*a3);
vacc3x0123456789ABCDEF = _mm512_fmadd_ps(va3, vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
a0 += 1;
a1 += 1;
a2 += 1;
a3 += 1;
k -= sizeof(float);
} while (k != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
vacc0x0123456789ABCDEF = _mm512_max_ps(vmin, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_max_ps(vmin, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_max_ps(vmin, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_max_ps(vmin, vacc3x0123456789ABCDEF);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
vacc0x0123456789ABCDEF = _mm512_min_ps(vmax, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_min_ps(vmax, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_min_ps(vmax, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_min_ps(vmax, vacc3x0123456789ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 15) {
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
_mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
_mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
_mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
_mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
}
nc = 0;
}
} while (nc != 0);
}
| 4,415 | 32.454545 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x16-minmax-fma3-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x16__fma3_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(w + 0);
__m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
w += 16;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
w += 16;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEF, vacc3x89ABCDEF);
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_max_ps(vmin, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_max_ps(vmin, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_max_ps(vmin, vacc3x89ABCDEF);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_min_ps(vmax, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_min_ps(vmax, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_min_ps(vmax, vacc3x89ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
_mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 8) {
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c0, vacc0x01234567);
vacc3x01234567 = vacc3x89ABCDEF;
vacc2x01234567 = vacc2x89ABCDEF;
vacc1x01234567 = vacc1x89ABCDEF;
vacc0x01234567 = vacc0x89ABCDEF;
c3 += 8;
c2 += 8;
c1 += 8;
c0 += 8;
}
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 6,706 | 32.368159 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x2-minmax-aarch64-neonfma-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/MRx2-neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x2__aarch64_neonfma_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x2_t vacc0x01 = vld1_f32(w); w += 2;
float32x2_t vacc1x01 = vacc0x01;
float32x2_t vacc2x01 = vacc0x01;
float32x2_t vacc3x01 = vacc0x01;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x4_t vb01c01 = vld1q_f32(w); w += 4;
const float32x2_t vb01c0 = vget_low_f32(vb01c01);
const float32x2_t vb01c1 = vget_high_f32(vb01c01);
#if XNN_ARCH_ARM64
vacc0x01 = vfma_lane_f32(vacc0x01, vb01c0, va0, 0);
vacc1x01 = vfma_lane_f32(vacc1x01, vb01c0, va1, 0);
vacc2x01 = vfma_lane_f32(vacc2x01, vb01c0, va2, 0);
vacc3x01 = vfma_lane_f32(vacc3x01, vb01c0, va3, 0);
#else
const float32x2_t va0c0 = vdup_lane_f32(va0, 0);
const float32x2_t va1c0 = vdup_lane_f32(va1, 0);
const float32x2_t va2c0 = vdup_lane_f32(va2, 0);
const float32x2_t va3c0 = vdup_lane_f32(va3, 0);
vacc0x01 = vfma_f32(vacc0x01, va0c0, vb01c0);
vacc1x01 = vfma_f32(vacc1x01, va1c0, vb01c0);
vacc2x01 = vfma_f32(vacc2x01, va2c0, vb01c0);
vacc3x01 = vfma_f32(vacc3x01, va3c0, vb01c0);
#endif
#if XNN_ARCH_ARM64
vacc0x01 = vfma_lane_f32(vacc0x01, vb01c1, va0, 1);
vacc1x01 = vfma_lane_f32(vacc1x01, vb01c1, va1, 1);
vacc2x01 = vfma_lane_f32(vacc2x01, vb01c1, va2, 1);
vacc3x01 = vfma_lane_f32(vacc3x01, vb01c1, va3, 1);
#else
const float32x2_t va0c1 = vdup_lane_f32(va0, 1);
const float32x2_t va1c1 = vdup_lane_f32(va1, 1);
const float32x2_t va2c1 = vdup_lane_f32(va2, 1);
const float32x2_t va3c1 = vdup_lane_f32(va3, 1);
vacc0x01 = vfma_f32(vacc0x01, va0c1, vb01c1);
vacc1x01 = vfma_f32(vacc1x01, va1c1, vb01c1);
vacc2x01 = vfma_f32(vacc2x01, va2c1, vb01c1);
vacc3x01 = vfma_f32(vacc3x01, va3c1, vb01c1);
#endif
}
if XNN_UNLIKELY(k != 0) {
const float32x2_t va0 = vld1_dup_f32(a0); a0 += 1;
const float32x2_t va1 = vld1_dup_f32(a1); a1 += 1;
const float32x2_t va2 = vld1_dup_f32(a2); a2 += 1;
const float32x2_t va3 = vld1_dup_f32(a3); a3 += 1;
const float32x2_t vb01 = vld1_f32(w); w += 2;
vacc0x01 = vfma_f32(vacc0x01, va0, vb01);
vacc1x01 = vfma_f32(vacc1x01, va1, vb01);
vacc2x01 = vfma_f32(vacc2x01, va2, vb01);
vacc3x01 = vfma_f32(vacc3x01, va3, vb01);
}
const float32x2_t vmax = vld1_dup_f32(¶ms->scalar.max);
vacc0x01 = vmin_f32(vacc0x01, vmax);
vacc1x01 = vmin_f32(vacc1x01, vmax);
vacc2x01 = vmin_f32(vacc2x01, vmax);
vacc3x01 = vmin_f32(vacc3x01, vmax);
const float32x2_t vmin = vld1_dup_f32(¶ms->scalar.min);
vacc0x01 = vmax_f32(vacc0x01, vmin);
vacc1x01 = vmax_f32(vacc1x01, vmin);
vacc2x01 = vmax_f32(vacc2x01, vmin);
vacc3x01 = vmax_f32(vacc3x01, vmin);
if XNN_LIKELY(nc >= 2) {
vst1_f32(c0, vacc0x01);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
vst1_f32(c1, vacc1x01);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1_f32(c2, vacc2x01);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1_f32(c3, vacc3x01);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
nc -= 2;
} else {
assert(nc == 1);
vst1_lane_f32(c0, vacc0x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
nc = 0;
}
} while (nc != 0);
}
| 5,338 | 32.578616 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x2-minmax-neon-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/MRx2-neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x2__neon_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x2_t vacc0x01 = vld1_f32(w); w += 2;
float32x2_t vacc1x01 = vacc0x01;
float32x2_t vacc2x01 = vacc0x01;
float32x2_t vacc3x01 = vacc0x01;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x4_t vb01c01 = vld1q_f32(w); w += 4;
const float32x2_t vb01c0 = vget_low_f32(vb01c01);
const float32x2_t vb01c1 = vget_high_f32(vb01c01);
vacc0x01 = vmla_lane_f32(vacc0x01, vb01c0, va0, 0);
vacc1x01 = vmla_lane_f32(vacc1x01, vb01c0, va1, 0);
vacc2x01 = vmla_lane_f32(vacc2x01, vb01c0, va2, 0);
vacc3x01 = vmla_lane_f32(vacc3x01, vb01c0, va3, 0);
vacc0x01 = vmla_lane_f32(vacc0x01, vb01c1, va0, 1);
vacc1x01 = vmla_lane_f32(vacc1x01, vb01c1, va1, 1);
vacc2x01 = vmla_lane_f32(vacc2x01, vb01c1, va2, 1);
vacc3x01 = vmla_lane_f32(vacc3x01, vb01c1, va3, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x2_t va0 = vld1_dup_f32(a0); a0 += 1;
const float32x2_t va1 = vld1_dup_f32(a1); a1 += 1;
const float32x2_t va2 = vld1_dup_f32(a2); a2 += 1;
const float32x2_t va3 = vld1_dup_f32(a3); a3 += 1;
const float32x2_t vb01 = vld1_f32(w); w += 2;
vacc0x01 = vmla_f32(vacc0x01, va0, vb01);
vacc1x01 = vmla_f32(vacc1x01, va1, vb01);
vacc2x01 = vmla_f32(vacc2x01, va2, vb01);
vacc3x01 = vmla_f32(vacc3x01, va3, vb01);
}
const float32x2_t vmax = vld1_dup_f32(¶ms->scalar.max);
vacc0x01 = vmin_f32(vacc0x01, vmax);
vacc1x01 = vmin_f32(vacc1x01, vmax);
vacc2x01 = vmin_f32(vacc2x01, vmax);
vacc3x01 = vmin_f32(vacc3x01, vmax);
const float32x2_t vmin = vld1_dup_f32(¶ms->scalar.min);
vacc0x01 = vmax_f32(vacc0x01, vmin);
vacc1x01 = vmax_f32(vacc1x01, vmin);
vacc2x01 = vmax_f32(vacc2x01, vmin);
vacc3x01 = vmax_f32(vacc3x01, vmin);
if XNN_LIKELY(nc >= 2) {
vst1_f32(c0, vacc0x01);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
vst1_f32(c1, vacc1x01);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1_f32(c2, vacc2x01);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1_f32(c3, vacc3x01);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
nc -= 2;
} else {
assert(nc == 1);
vst1_lane_f32(c0, vacc0x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
nc = 0;
}
} while (nc != 0);
}
| 4,323 | 30.562044 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x2-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_gemm_minmax_ukernel_4x2__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
float vacc00 = w[0];
float vacc01 = w[1];
w += 2;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc30 = vacc00;
float vacc31 = vacc01;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = w[0];
const float vb1 = w[1];
w += 2;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
k -= sizeof(float);
} while (k != 0);
vacc00 = math_max_f32(vacc00, vmin);
vacc01 = math_max_f32(vacc01, vmin);
vacc10 = math_max_f32(vacc10, vmin);
vacc11 = math_max_f32(vacc11, vmin);
vacc20 = math_max_f32(vacc20, vmin);
vacc21 = math_max_f32(vacc21, vmin);
vacc30 = math_max_f32(vacc30, vmin);
vacc31 = math_max_f32(vacc31, vmin);
vacc00 = math_min_f32(vacc00, vmax);
vacc01 = math_min_f32(vacc01, vmax);
vacc10 = math_min_f32(vacc10, vmax);
vacc11 = math_min_f32(vacc11, vmax);
vacc20 = math_min_f32(vacc20, vmax);
vacc21 = math_min_f32(vacc21, vmax);
vacc30 = math_min_f32(vacc30, vmax);
vacc31 = math_min_f32(vacc31, vmax);
if XNN_LIKELY(nc >= 2) {
c3[0] = vacc30;
c3[1] = vacc31;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 2;
} else {
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 3,866 | 25.854167 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x2-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_gemm_minmax_ukernel_4x2__wasm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
float vacc00 = w[0];
float vacc01 = w[1];
w += 2;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc30 = vacc00;
float vacc31 = vacc01;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = w[0];
const float vb1 = w[1];
w += 2;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
k -= sizeof(float);
} while (k != 0);
vacc00 = __builtin_wasm_max_f32(vacc00, vmin);
vacc01 = __builtin_wasm_max_f32(vacc01, vmin);
vacc10 = __builtin_wasm_max_f32(vacc10, vmin);
vacc11 = __builtin_wasm_max_f32(vacc11, vmin);
vacc20 = __builtin_wasm_max_f32(vacc20, vmin);
vacc21 = __builtin_wasm_max_f32(vacc21, vmin);
vacc30 = __builtin_wasm_max_f32(vacc30, vmin);
vacc31 = __builtin_wasm_max_f32(vacc31, vmin);
vacc00 = __builtin_wasm_min_f32(vacc00, vmax);
vacc01 = __builtin_wasm_min_f32(vacc01, vmax);
vacc10 = __builtin_wasm_min_f32(vacc10, vmax);
vacc11 = __builtin_wasm_min_f32(vacc11, vmax);
vacc20 = __builtin_wasm_min_f32(vacc20, vmax);
vacc21 = __builtin_wasm_min_f32(vacc21, vmax);
vacc30 = __builtin_wasm_min_f32(vacc30, vmax);
vacc31 = __builtin_wasm_min_f32(vacc31, vmax);
if XNN_LIKELY(nc >= 2) {
c3[0] = vacc30;
c3[1] = vacc31;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 2;
} else {
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 4,024 | 26.951389 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x2-relu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_gemm_relu_ukernel_4x2__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float vacc00 = w[0];
float vacc01 = w[1];
w += 2;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc30 = vacc00;
float vacc31 = vacc01;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = w[0];
const float vb1 = w[1];
w += 2;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
k -= sizeof(float);
} while (k != 0);
vacc00 = math_max_f32(vacc00, 0.0f);
vacc01 = math_max_f32(vacc01, 0.0f);
vacc10 = math_max_f32(vacc10, 0.0f);
vacc11 = math_max_f32(vacc11, 0.0f);
vacc20 = math_max_f32(vacc20, 0.0f);
vacc21 = math_max_f32(vacc21, 0.0f);
vacc30 = math_max_f32(vacc30, 0.0f);
vacc31 = math_max_f32(vacc31, 0.0f);
if XNN_LIKELY(nc >= 2) {
c3[0] = vacc30;
c3[1] = vacc31;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 2;
} else {
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 3,451 | 24.954887 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x2-relu-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_gemm_relu_ukernel_4x2__wasm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float vacc00 = w[0];
float vacc01 = w[1];
w += 2;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc30 = vacc00;
float vacc31 = vacc01;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = w[0];
const float vb1 = w[1];
w += 2;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
k -= sizeof(float);
} while (k != 0);
vacc00 = __builtin_wasm_max_f32(vacc00, 0.0f);
vacc01 = __builtin_wasm_max_f32(vacc01, 0.0f);
vacc10 = __builtin_wasm_max_f32(vacc10, 0.0f);
vacc11 = __builtin_wasm_max_f32(vacc11, 0.0f);
vacc20 = __builtin_wasm_max_f32(vacc20, 0.0f);
vacc21 = __builtin_wasm_max_f32(vacc21, 0.0f);
vacc30 = __builtin_wasm_max_f32(vacc30, 0.0f);
vacc31 = __builtin_wasm_max_f32(vacc31, 0.0f);
if XNN_LIKELY(nc >= 2) {
c3[0] = vacc30;
c3[1] = vacc31;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 2;
} else {
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 3,529 | 25.541353 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x2-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_gemm_ukernel_4x2__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float vacc00 = w[0];
float vacc01 = w[1];
w += 2;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc30 = vacc00;
float vacc31 = vacc01;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = w[0];
const float vb1 = w[1];
w += 2;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
k -= sizeof(float);
} while (k != 0);
if XNN_LIKELY(nc >= 2) {
c3[0] = vacc30;
c3[1] = vacc31;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 2;
} else {
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 3,121 | 23.976 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x2c4-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/MRx2c4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x2c4__sse(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128 vacc0x0c4 = _mm_load_ss(w);
__m128 vacc0x1c4 = _mm_load_ss(w + 1);
__m128 vacc1x0c4 = vacc0x0c4;
__m128 vacc1x1c4 = vacc0x1c4;
__m128 vacc2x0c4 = vacc0x0c4;
__m128 vacc2x1c4 = vacc0x1c4;
__m128 vacc3x0c4 = vacc0x0c4;
__m128 vacc3x1c4 = vacc0x1c4;
w += 2;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
const __m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128 vb0 = _mm_loadu_ps(w);
const __m128 vb1 = _mm_loadu_ps(w + 4);
w += 8;
vacc0x0c4 = _mm_add_ps(vacc0x0c4, _mm_mul_ps(va0, vb0));
vacc0x1c4 = _mm_add_ps(vacc0x1c4, _mm_mul_ps(va0, vb1));
vacc1x0c4 = _mm_add_ps(vacc1x0c4, _mm_mul_ps(va1, vb0));
vacc1x1c4 = _mm_add_ps(vacc1x1c4, _mm_mul_ps(va1, vb1));
vacc2x0c4 = _mm_add_ps(vacc2x0c4, _mm_mul_ps(va2, vb0));
vacc2x1c4 = _mm_add_ps(vacc2x1c4, _mm_mul_ps(va2, vb1));
vacc3x0c4 = _mm_add_ps(vacc3x0c4, _mm_mul_ps(va3, vb0));
vacc3x1c4 = _mm_add_ps(vacc3x1c4, _mm_mul_ps(va3, vb1));
}
if XNN_UNLIKELY(k != 0) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
const __m128 va1 = _mm_loadu_ps(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
const __m128 va2 = _mm_loadu_ps(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const __m128 va3 = _mm_loadu_ps(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const __m128 vb0 = _mm_loadu_ps(w);
const __m128 vb1 = _mm_loadu_ps(w + 4);
w += 8;
const __m128 vmask0 = _mm_cmpeq_ps(_mm_setzero_ps(), vb0);
const __m128 vmask1 = _mm_cmpeq_ps(_mm_setzero_ps(), vb1);
vacc0x0c4 = _mm_add_ps(vacc0x0c4, _mm_mul_ps(_mm_andnot_ps(vmask0, va0), vb0));
vacc0x1c4 = _mm_add_ps(vacc0x1c4, _mm_mul_ps(_mm_andnot_ps(vmask1, va0), vb1));
vacc1x0c4 = _mm_add_ps(vacc1x0c4, _mm_mul_ps(_mm_andnot_ps(vmask0, va1), vb0));
vacc1x1c4 = _mm_add_ps(vacc1x1c4, _mm_mul_ps(_mm_andnot_ps(vmask1, va1), vb1));
vacc2x0c4 = _mm_add_ps(vacc2x0c4, _mm_mul_ps(_mm_andnot_ps(vmask0, va2), vb0));
vacc2x1c4 = _mm_add_ps(vacc2x1c4, _mm_mul_ps(_mm_andnot_ps(vmask1, va2), vb1));
vacc3x0c4 = _mm_add_ps(vacc3x0c4, _mm_mul_ps(_mm_andnot_ps(vmask0, va3), vb0));
vacc3x1c4 = _mm_add_ps(vacc3x1c4, _mm_mul_ps(_mm_andnot_ps(vmask1, va3), vb1));
}
const __m128 vacc0x01c2 = _mm_add_ps(_mm_unpacklo_ps(vacc0x0c4, vacc0x1c4), _mm_unpackhi_ps(vacc0x0c4, vacc0x1c4));
const __m128 vacc1x01c2 = _mm_add_ps(_mm_unpacklo_ps(vacc1x0c4, vacc1x1c4), _mm_unpackhi_ps(vacc1x0c4, vacc1x1c4));
const __m128 vacc2x01c2 = _mm_add_ps(_mm_unpacklo_ps(vacc2x0c4, vacc2x1c4), _mm_unpackhi_ps(vacc2x0c4, vacc2x1c4));
const __m128 vacc3x01c2 = _mm_add_ps(_mm_unpacklo_ps(vacc3x0c4, vacc3x1c4), _mm_unpackhi_ps(vacc3x0c4, vacc3x1c4));
__m128 vacc01x01 = _mm_add_ps(_mm_movelh_ps(vacc0x01c2, vacc1x01c2), _mm_movehl_ps(vacc1x01c2, vacc0x01c2));
__m128 vacc23x01 = _mm_add_ps(_mm_movelh_ps(vacc2x01c2, vacc3x01c2), _mm_movehl_ps(vacc3x01c2, vacc2x01c2));
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc01x01 = _mm_min_ps(vacc01x01, vmax);
vacc23x01 = _mm_min_ps(vacc23x01, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc01x01 = _mm_max_ps(vacc01x01, vmin);
vacc23x01 = _mm_max_ps(vacc23x01, vmin);
if XNN_LIKELY(nc >= 2) {
_mm_storel_pi((__m64*) c2, vacc23x01);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
_mm_storeh_pi((__m64*) c3, vacc23x01);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
_mm_storel_pi((__m64*) c0, vacc01x01);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
_mm_storeh_pi((__m64*) c1, vacc01x01);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
a1 = (const float*) ((uintptr_t) a1 - kc);
nc -= 2;
} else {
assert(nc == 1);
_mm_store_ss(c2, vacc23x01);
_mm_store_ss(c3, _mm_movehl_ps(vacc23x01, vacc23x01));
_mm_store_ss(c0, vacc01x01);
_mm_store_ss(c1, _mm_movehl_ps(vacc01x01, vacc01x01));
nc = 0;
}
} while (nc != 0);
}
| 5,883 | 35.09816 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x2c4-minmax-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/MRx2c4-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x2c4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0c4 = wasm_v128_load32_zero(w);
v128_t vacc0x1c4 = wasm_v128_load32_zero(w + 1);
v128_t vacc1x0c4 = vacc0x0c4;
v128_t vacc1x1c4 = vacc0x1c4;
v128_t vacc2x0c4 = vacc0x0c4;
v128_t vacc2x1c4 = vacc0x1c4;
v128_t vacc3x0c4 = vacc0x0c4;
v128_t vacc3x1c4 = vacc0x1c4;
w += 2;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
vacc0x0c4 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0, vacc0x0c4);
vacc0x1c4 = __builtin_wasm_relaxed_madd_f32x4(va0, vb1, vacc0x1c4);
vacc1x0c4 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0, vacc1x0c4);
vacc1x1c4 = __builtin_wasm_relaxed_madd_f32x4(va1, vb1, vacc1x1c4);
vacc2x0c4 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0, vacc2x0c4);
vacc2x1c4 = __builtin_wasm_relaxed_madd_f32x4(va2, vb1, vacc2x1c4);
vacc3x0c4 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0, vacc3x0c4);
vacc3x1c4 = __builtin_wasm_relaxed_madd_f32x4(va3, vb1, vacc3x1c4);
}
if XNN_UNLIKELY(k != 0) {
const v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
const v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
const v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vmask0 = wasm_f32x4_eq(vb0, vzero);
const v128_t vmask1 = wasm_f32x4_eq(vb1, vzero);
vacc0x0c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, vmask0), vb0, vacc0x0c4);
vacc0x1c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, vmask1), vb1, vacc0x1c4);
vacc1x0c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, vmask0), vb0, vacc1x0c4);
vacc1x1c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, vmask1), vb1, vacc1x1c4);
vacc2x0c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, vmask0), vb0, vacc2x0c4);
vacc2x1c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, vmask1), vb1, vacc2x1c4);
vacc3x0c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, vmask0), vb0, vacc3x0c4);
vacc3x1c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, vmask1), vb1, vacc3x1c4);
}
const v128_t vacc0x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 2, 6, 3, 7));
const v128_t vacc1x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 2, 6, 3, 7));
const v128_t vacc2x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 2, 6, 3, 7));
const v128_t vacc3x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 2, 6, 3, 7));
v128_t vacc01x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 2, 3, 6, 7));
v128_t vacc23x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 2, 3, 6, 7));
vacc01x01 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc01x01);
vacc23x01 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc23x01);
vacc01x01 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc01x01);
vacc23x01 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc23x01);
if XNN_LIKELY(nc >= 2) {
wasm_v128_store64_lane(c2, vacc23x01, 0);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
wasm_v128_store64_lane(c3, vacc23x01, 1);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
wasm_v128_store64_lane(c0, vacc01x01, 0);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
wasm_v128_store64_lane(c1, vacc01x01, 1);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
a1 = (const float*) ((uintptr_t) a1 - kc);
nc -= 2;
} else {
assert(nc == 1);
wasm_v128_store32_lane(c2, vacc23x01, 0);
wasm_v128_store32_lane(c3, vacc23x01, 2);
wasm_v128_store32_lane(c0, vacc01x01, 0);
wasm_v128_store32_lane(c1, vacc01x01, 2);
nc = 0;
}
} while (nc != 0);
}
| 6,606 | 36.539773 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x2c4-minmax-wasmrelaxedsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/MRx2c4-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x2c4__wasmrelaxedsimd(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0c4 = wasm_v128_load32_zero(w);
v128_t vacc0x1c4 = wasm_v128_load32_zero(w + 1);
v128_t vacc1x0c4 = vacc0x0c4;
v128_t vacc1x1c4 = vacc0x1c4;
v128_t vacc2x0c4 = vacc0x0c4;
v128_t vacc2x1c4 = vacc0x1c4;
v128_t vacc3x0c4 = vacc0x0c4;
v128_t vacc3x1c4 = vacc0x1c4;
w += 2;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
vacc0x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0), vacc0x0c4);
vacc0x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb1), vacc0x1c4);
vacc1x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0), vacc1x0c4);
vacc1x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb1), vacc1x1c4);
vacc2x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0), vacc2x0c4);
vacc2x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb1), vacc2x1c4);
vacc3x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0), vacc3x0c4);
vacc3x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb1), vacc3x1c4);
}
if XNN_UNLIKELY(k != 0) {
const v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
const v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
const v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vmask0 = wasm_f32x4_eq(vb0, vzero);
const v128_t vmask1 = wasm_f32x4_eq(vb1, vzero);
vacc0x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, vmask0), vb0), vacc0x0c4);
vacc0x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, vmask1), vb1), vacc0x1c4);
vacc1x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, vmask0), vb0), vacc1x0c4);
vacc1x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, vmask1), vb1), vacc1x1c4);
vacc2x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, vmask0), vb0), vacc2x0c4);
vacc2x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, vmask1), vb1), vacc2x1c4);
vacc3x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, vmask0), vb0), vacc3x0c4);
vacc3x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, vmask1), vb1), vacc3x1c4);
}
const v128_t vacc0x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 2, 6, 3, 7));
const v128_t vacc1x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 2, 6, 3, 7));
const v128_t vacc2x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 2, 6, 3, 7));
const v128_t vacc3x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 2, 6, 3, 7));
v128_t vacc01x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 2, 3, 6, 7));
v128_t vacc23x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 2, 3, 6, 7));
vacc01x01 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc01x01);
vacc23x01 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc23x01);
vacc01x01 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc01x01);
vacc23x01 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc23x01);
if XNN_LIKELY(nc >= 2) {
wasm_v128_store64_lane(c2, vacc23x01, 0);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
wasm_v128_store64_lane(c3, vacc23x01, 1);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
wasm_v128_store64_lane(c0, vacc01x01, 0);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
wasm_v128_store64_lane(c1, vacc01x01, 1);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
a1 = (const float*) ((uintptr_t) a1 - kc);
nc -= 2;
} else {
assert(nc == 1);
wasm_v128_store32_lane(c2, vacc23x01, 0);
wasm_v128_store32_lane(c3, vacc23x01, 2);
wasm_v128_store32_lane(c0, vacc01x01, 0);
wasm_v128_store32_lane(c1, vacc01x01, 2);
nc = 0;
}
} while (nc != 0);
}
| 6,554 | 36.244318 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x2c4-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/MRx2c4-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x2c4__wasmsimd_arm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0c4 = wasm_v128_load32_zero(w);
v128_t vacc0x1c4 = wasm_v128_load32_zero(w + 1);
v128_t vacc1x0c4 = vacc0x0c4;
v128_t vacc1x1c4 = vacc0x1c4;
v128_t vacc2x0c4 = vacc0x0c4;
v128_t vacc2x1c4 = vacc0x1c4;
v128_t vacc3x0c4 = vacc0x0c4;
v128_t vacc3x1c4 = vacc0x1c4;
w += 2;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
vacc0x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0), vacc0x0c4);
vacc0x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb1), vacc0x1c4);
vacc1x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0), vacc1x0c4);
vacc1x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb1), vacc1x1c4);
vacc2x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0), vacc2x0c4);
vacc2x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb1), vacc2x1c4);
vacc3x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0), vacc3x0c4);
vacc3x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb1), vacc3x1c4);
}
if XNN_UNLIKELY(k != 0) {
const v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
const v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
const v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vmask0 = wasm_f32x4_eq(vb0, vzero);
const v128_t vmask1 = wasm_f32x4_eq(vb1, vzero);
vacc0x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, vmask0), vb0), vacc0x0c4);
vacc0x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, vmask1), vb1), vacc0x1c4);
vacc1x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, vmask0), vb0), vacc1x0c4);
vacc1x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, vmask1), vb1), vacc1x1c4);
vacc2x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, vmask0), vb0), vacc2x0c4);
vacc2x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, vmask1), vb1), vacc2x1c4);
vacc3x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, vmask0), vb0), vacc3x0c4);
vacc3x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, vmask1), vb1), vacc3x1c4);
}
const v128_t vacc0x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 2, 6, 3, 7));
const v128_t vacc1x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 2, 6, 3, 7));
const v128_t vacc2x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 2, 6, 3, 7));
const v128_t vacc3x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 2, 6, 3, 7));
v128_t vacc01x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 2, 3, 6, 7));
v128_t vacc23x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 2, 3, 6, 7));
vacc01x01 = wasm_f32x4_max(vmin, vacc01x01);
vacc23x01 = wasm_f32x4_max(vmin, vacc23x01);
vacc01x01 = wasm_f32x4_min(vmax, vacc01x01);
vacc23x01 = wasm_f32x4_min(vmax, vacc23x01);
if XNN_LIKELY(nc >= 2) {
wasm_v128_store64_lane(c2, vacc23x01, 0);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
wasm_v128_store64_lane(c3, vacc23x01, 1);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
wasm_v128_store64_lane(c0, vacc01x01, 0);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
wasm_v128_store64_lane(c1, vacc01x01, 1);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
a1 = (const float*) ((uintptr_t) a1 - kc);
nc -= 2;
} else {
assert(nc == 1);
wasm_v128_store32_lane(c2, vacc23x01, 0);
wasm_v128_store32_lane(c3, vacc23x01, 2);
wasm_v128_store32_lane(c0, vacc01x01, 0);
wasm_v128_store32_lane(c1, vacc01x01, 2);
nc = 0;
}
} while (nc != 0);
}
| 6,479 | 35.818182 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x2c4-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/MRx2c4-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x2c4__wasmsimd_x86(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0c4 = wasm_v128_load32_zero(w);
v128_t vacc0x1c4 = wasm_v128_load32_zero(w + 1);
v128_t vacc1x0c4 = vacc0x0c4;
v128_t vacc1x1c4 = vacc0x1c4;
v128_t vacc2x0c4 = vacc0x0c4;
v128_t vacc2x1c4 = vacc0x1c4;
v128_t vacc3x0c4 = vacc0x0c4;
v128_t vacc3x1c4 = vacc0x1c4;
w += 2;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
vacc0x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0), vacc0x0c4);
vacc0x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb1), vacc0x1c4);
vacc1x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0), vacc1x0c4);
vacc1x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb1), vacc1x1c4);
vacc2x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0), vacc2x0c4);
vacc2x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb1), vacc2x1c4);
vacc3x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0), vacc3x0c4);
vacc3x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb1), vacc3x1c4);
}
if XNN_UNLIKELY(k != 0) {
const v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
const v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
const v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vmask0 = wasm_f32x4_eq(vb0, vzero);
const v128_t vmask1 = wasm_f32x4_eq(vb1, vzero);
vacc0x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, vmask0), vb0), vacc0x0c4);
vacc0x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, vmask1), vb1), vacc0x1c4);
vacc1x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, vmask0), vb0), vacc1x0c4);
vacc1x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, vmask1), vb1), vacc1x1c4);
vacc2x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, vmask0), vb0), vacc2x0c4);
vacc2x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, vmask1), vb1), vacc2x1c4);
vacc3x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, vmask0), vb0), vacc3x0c4);
vacc3x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, vmask1), vb1), vacc3x1c4);
}
const v128_t vacc0x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 2, 6, 3, 7));
const v128_t vacc1x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 2, 6, 3, 7));
const v128_t vacc2x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 2, 6, 3, 7));
const v128_t vacc3x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 2, 6, 3, 7));
v128_t vacc01x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 2, 3, 6, 7));
v128_t vacc23x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 2, 3, 6, 7));
vacc01x01 = wasm_f32x4_pmax(vmin, vacc01x01);
vacc23x01 = wasm_f32x4_pmax(vmin, vacc23x01);
vacc01x01 = wasm_f32x4_pmin(vmax, vacc01x01);
vacc23x01 = wasm_f32x4_pmin(vmax, vacc23x01);
if XNN_LIKELY(nc >= 2) {
wasm_v128_store64_lane(c2, vacc23x01, 0);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
wasm_v128_store64_lane(c3, vacc23x01, 1);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
wasm_v128_store64_lane(c0, vacc01x01, 0);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
wasm_v128_store64_lane(c1, vacc01x01, 1);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
a1 = (const float*) ((uintptr_t) a1 - kc);
nc -= 2;
} else {
assert(nc == 1);
wasm_v128_store32_lane(c2, vacc23x01, 0);
wasm_v128_store32_lane(c3, vacc23x01, 2);
wasm_v128_store32_lane(c0, vacc01x01, 0);
wasm_v128_store32_lane(c1, vacc01x01, 2);
nc = 0;
}
} while (nc != 0);
}
| 6,483 | 35.840909 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x2c4-relu-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/MRx2c4-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_relu_ukernel_4x2c4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
v128_t vacc0x0c4 = wasm_v128_load32_zero(w);
v128_t vacc0x1c4 = wasm_v128_load32_zero(w + 1);
v128_t vacc1x0c4 = vacc0x0c4;
v128_t vacc1x1c4 = vacc0x1c4;
v128_t vacc2x0c4 = vacc0x0c4;
v128_t vacc2x1c4 = vacc0x1c4;
v128_t vacc3x0c4 = vacc0x0c4;
v128_t vacc3x1c4 = vacc0x1c4;
w += 2;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
vacc0x0c4 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0, vacc0x0c4);
vacc0x1c4 = __builtin_wasm_relaxed_madd_f32x4(va0, vb1, vacc0x1c4);
vacc1x0c4 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0, vacc1x0c4);
vacc1x1c4 = __builtin_wasm_relaxed_madd_f32x4(va1, vb1, vacc1x1c4);
vacc2x0c4 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0, vacc2x0c4);
vacc2x1c4 = __builtin_wasm_relaxed_madd_f32x4(va2, vb1, vacc2x1c4);
vacc3x0c4 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0, vacc3x0c4);
vacc3x1c4 = __builtin_wasm_relaxed_madd_f32x4(va3, vb1, vacc3x1c4);
}
if XNN_UNLIKELY(k != 0) {
const v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
const v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
const v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vmask0 = wasm_f32x4_eq(vb0, vzero);
const v128_t vmask1 = wasm_f32x4_eq(vb1, vzero);
vacc0x0c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, vmask0), vb0, vacc0x0c4);
vacc0x1c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, vmask1), vb1, vacc0x1c4);
vacc1x0c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, vmask0), vb0, vacc1x0c4);
vacc1x1c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, vmask1), vb1, vacc1x1c4);
vacc2x0c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, vmask0), vb0, vacc2x0c4);
vacc2x1c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, vmask1), vb1, vacc2x1c4);
vacc3x0c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, vmask0), vb0, vacc3x0c4);
vacc3x1c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, vmask1), vb1, vacc3x1c4);
}
const v128_t vacc0x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 2, 6, 3, 7));
const v128_t vacc1x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 2, 6, 3, 7));
const v128_t vacc2x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 2, 6, 3, 7));
const v128_t vacc3x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 2, 6, 3, 7));
v128_t vacc01x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 2, 3, 6, 7));
v128_t vacc23x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 2, 3, 6, 7));
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc01x01 = wasm_i32x4_max(vacc01x01, vzero);
vacc23x01 = wasm_i32x4_max(vacc23x01, vzero);
if XNN_LIKELY(nc >= 2) {
wasm_v128_store64_lane(c2, vacc23x01, 0);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
wasm_v128_store64_lane(c3, vacc23x01, 1);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
wasm_v128_store64_lane(c0, vacc01x01, 0);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
wasm_v128_store64_lane(c1, vacc01x01, 1);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
a1 = (const float*) ((uintptr_t) a1 - kc);
nc -= 2;
} else {
assert(nc == 1);
wasm_v128_store32_lane(c2, vacc23x01, 0);
wasm_v128_store32_lane(c3, vacc23x01, 2);
wasm_v128_store32_lane(c0, vacc01x01, 0);
wasm_v128_store32_lane(c1, vacc01x01, 2);
nc = 0;
}
} while (nc != 0);
}
| 6,349 | 35.918605 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x2c4-relu-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/MRx2c4-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_relu_ukernel_4x2c4__wasmsimd(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
v128_t vacc0x0c4 = wasm_v128_load32_zero(w);
v128_t vacc0x1c4 = wasm_v128_load32_zero(w + 1);
v128_t vacc1x0c4 = vacc0x0c4;
v128_t vacc1x1c4 = vacc0x1c4;
v128_t vacc2x0c4 = vacc0x0c4;
v128_t vacc2x1c4 = vacc0x1c4;
v128_t vacc3x0c4 = vacc0x0c4;
v128_t vacc3x1c4 = vacc0x1c4;
w += 2;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
vacc0x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0), vacc0x0c4);
vacc0x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb1), vacc0x1c4);
vacc1x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0), vacc1x0c4);
vacc1x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb1), vacc1x1c4);
vacc2x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0), vacc2x0c4);
vacc2x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb1), vacc2x1c4);
vacc3x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0), vacc3x0c4);
vacc3x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb1), vacc3x1c4);
}
if XNN_UNLIKELY(k != 0) {
const v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
const v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
const v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vmask0 = wasm_f32x4_eq(vb0, vzero);
const v128_t vmask1 = wasm_f32x4_eq(vb1, vzero);
vacc0x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, vmask0), vb0), vacc0x0c4);
vacc0x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, vmask1), vb1), vacc0x1c4);
vacc1x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, vmask0), vb0), vacc1x0c4);
vacc1x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, vmask1), vb1), vacc1x1c4);
vacc2x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, vmask0), vb0), vacc2x0c4);
vacc2x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, vmask1), vb1), vacc2x1c4);
vacc3x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, vmask0), vb0), vacc3x0c4);
vacc3x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, vmask1), vb1), vacc3x1c4);
}
const v128_t vacc0x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 2, 6, 3, 7));
const v128_t vacc1x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 2, 6, 3, 7));
const v128_t vacc2x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 2, 6, 3, 7));
const v128_t vacc3x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 2, 6, 3, 7));
v128_t vacc01x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 2, 3, 6, 7));
v128_t vacc23x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 2, 3, 6, 7));
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc01x01 = wasm_i32x4_max(vacc01x01, vzero);
vacc23x01 = wasm_i32x4_max(vacc23x01, vzero);
if XNN_LIKELY(nc >= 2) {
wasm_v128_store64_lane(c2, vacc23x01, 0);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
wasm_v128_store64_lane(c3, vacc23x01, 1);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
wasm_v128_store64_lane(c0, vacc01x01, 0);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
wasm_v128_store64_lane(c1, vacc01x01, 1);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
a1 = (const float*) ((uintptr_t) a1 - kc);
nc -= 2;
} else {
assert(nc == 1);
wasm_v128_store32_lane(c2, vacc23x01, 0);
wasm_v128_store32_lane(c3, vacc23x01, 2);
wasm_v128_store32_lane(c0, vacc01x01, 0);
wasm_v128_store32_lane(c1, vacc01x01, 2);
nc = 0;
}
} while (nc != 0);
}
| 6,290 | 35.575581 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x2c4-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/MRx2c4-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_ukernel_4x2c4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
v128_t vacc0x0c4 = wasm_v128_load32_zero(w);
v128_t vacc0x1c4 = wasm_v128_load32_zero(w + 1);
v128_t vacc1x0c4 = vacc0x0c4;
v128_t vacc1x1c4 = vacc0x1c4;
v128_t vacc2x0c4 = vacc0x0c4;
v128_t vacc2x1c4 = vacc0x1c4;
v128_t vacc3x0c4 = vacc0x0c4;
v128_t vacc3x1c4 = vacc0x1c4;
w += 2;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
vacc0x0c4 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0, vacc0x0c4);
vacc0x1c4 = __builtin_wasm_relaxed_madd_f32x4(va0, vb1, vacc0x1c4);
vacc1x0c4 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0, vacc1x0c4);
vacc1x1c4 = __builtin_wasm_relaxed_madd_f32x4(va1, vb1, vacc1x1c4);
vacc2x0c4 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0, vacc2x0c4);
vacc2x1c4 = __builtin_wasm_relaxed_madd_f32x4(va2, vb1, vacc2x1c4);
vacc3x0c4 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0, vacc3x0c4);
vacc3x1c4 = __builtin_wasm_relaxed_madd_f32x4(va3, vb1, vacc3x1c4);
}
if XNN_UNLIKELY(k != 0) {
const v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
const v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
const v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vmask0 = wasm_f32x4_eq(vb0, vzero);
const v128_t vmask1 = wasm_f32x4_eq(vb1, vzero);
vacc0x0c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, vmask0), vb0, vacc0x0c4);
vacc0x1c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, vmask1), vb1, vacc0x1c4);
vacc1x0c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, vmask0), vb0, vacc1x0c4);
vacc1x1c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, vmask1), vb1, vacc1x1c4);
vacc2x0c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, vmask0), vb0, vacc2x0c4);
vacc2x1c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, vmask1), vb1, vacc2x1c4);
vacc3x0c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, vmask0), vb0, vacc3x0c4);
vacc3x1c4 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, vmask1), vb1, vacc3x1c4);
}
const v128_t vacc0x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 2, 6, 3, 7));
const v128_t vacc1x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 2, 6, 3, 7));
const v128_t vacc2x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 2, 6, 3, 7));
const v128_t vacc3x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 2, 6, 3, 7));
v128_t vacc01x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 2, 3, 6, 7));
v128_t vacc23x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 2, 3, 6, 7));
if XNN_LIKELY(nc >= 2) {
wasm_v128_store64_lane(c2, vacc23x01, 0);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
wasm_v128_store64_lane(c3, vacc23x01, 1);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
wasm_v128_store64_lane(c0, vacc01x01, 0);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
wasm_v128_store64_lane(c1, vacc01x01, 1);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
a1 = (const float*) ((uintptr_t) a1 - kc);
nc -= 2;
} else {
assert(nc == 1);
wasm_v128_store32_lane(c2, vacc23x01, 0);
wasm_v128_store32_lane(c3, vacc23x01, 2);
wasm_v128_store32_lane(c0, vacc01x01, 0);
wasm_v128_store32_lane(c1, vacc01x01, 2);
nc = 0;
}
} while (nc != 0);
}
| 6,195 | 35.662722 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x2c4-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/MRx2c4-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_ukernel_4x2c4__wasmsimd(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
v128_t vacc0x0c4 = wasm_v128_load32_zero(w);
v128_t vacc0x1c4 = wasm_v128_load32_zero(w + 1);
v128_t vacc1x0c4 = vacc0x0c4;
v128_t vacc1x1c4 = vacc0x1c4;
v128_t vacc2x0c4 = vacc0x0c4;
v128_t vacc2x1c4 = vacc0x1c4;
v128_t vacc3x0c4 = vacc0x0c4;
v128_t vacc3x1c4 = vacc0x1c4;
w += 2;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
vacc0x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0), vacc0x0c4);
vacc0x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb1), vacc0x1c4);
vacc1x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0), vacc1x0c4);
vacc1x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb1), vacc1x1c4);
vacc2x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0), vacc2x0c4);
vacc2x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb1), vacc2x1c4);
vacc3x0c4 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0), vacc3x0c4);
vacc3x1c4 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb1), vacc3x1c4);
}
if XNN_UNLIKELY(k != 0) {
const v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
const v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
const v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vb0 = wasm_v128_load(w);
const v128_t vb1 = wasm_v128_load(w + 4);
w += 8;
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vmask0 = wasm_f32x4_eq(vb0, vzero);
const v128_t vmask1 = wasm_f32x4_eq(vb1, vzero);
vacc0x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, vmask0), vb0), vacc0x0c4);
vacc0x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, vmask1), vb1), vacc0x1c4);
vacc1x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, vmask0), vb0), vacc1x0c4);
vacc1x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, vmask1), vb1), vacc1x1c4);
vacc2x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, vmask0), vb0), vacc2x0c4);
vacc2x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, vmask1), vb1), vacc2x1c4);
vacc3x0c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, vmask0), vb0), vacc3x0c4);
vacc3x1c4 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, vmask1), vb1), vacc3x1c4);
}
const v128_t vacc0x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc0x0c4, vacc0x1c4, 2, 6, 3, 7));
const v128_t vacc1x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc1x0c4, vacc1x1c4, 2, 6, 3, 7));
const v128_t vacc2x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc2x0c4, vacc2x1c4, 2, 6, 3, 7));
const v128_t vacc3x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc3x0c4, vacc3x1c4, 2, 6, 3, 7));
v128_t vacc01x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc0x01c2, vacc1x01c2, 2, 3, 6, 7));
v128_t vacc23x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc2x01c2, vacc3x01c2, 2, 3, 6, 7));
if XNN_LIKELY(nc >= 2) {
wasm_v128_store64_lane(c2, vacc23x01, 0);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
wasm_v128_store64_lane(c3, vacc23x01, 1);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
wasm_v128_store64_lane(c0, vacc01x01, 0);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
wasm_v128_store64_lane(c1, vacc01x01, 1);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
a1 = (const float*) ((uintptr_t) a1 - kc);
nc -= 2;
} else {
assert(nc == 1);
wasm_v128_store32_lane(c2, vacc23x01, 0);
wasm_v128_store32_lane(c3, vacc23x01, 2);
wasm_v128_store32_lane(c0, vacc01x01, 0);
wasm_v128_store32_lane(c1, vacc01x01, 2);
nc = 0;
}
} while (nc != 0);
}
| 6,136 | 35.313609 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x4-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_gemm_minmax_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
float vacc00 = w[0];
float vacc01 = w[1];
float vacc02 = w[2];
float vacc03 = w[3];
w += 4;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc22 = vacc02;
float vacc23 = vacc03;
float vacc30 = vacc00;
float vacc31 = vacc01;
float vacc32 = vacc02;
float vacc33 = vacc03;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = w[0];
const float vb1 = w[1];
const float vb2 = w[2];
const float vb3 = w[3];
w += 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc22 = math_muladd_f32(va2, vb2, vacc22);
vacc23 = math_muladd_f32(va2, vb3, vacc23);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
vacc32 = math_muladd_f32(va3, vb2, vacc32);
vacc33 = math_muladd_f32(va3, vb3, vacc33);
k -= sizeof(float);
} while (k != 0);
vacc00 = math_max_f32(vacc00, vmin);
vacc01 = math_max_f32(vacc01, vmin);
vacc02 = math_max_f32(vacc02, vmin);
vacc03 = math_max_f32(vacc03, vmin);
vacc10 = math_max_f32(vacc10, vmin);
vacc11 = math_max_f32(vacc11, vmin);
vacc12 = math_max_f32(vacc12, vmin);
vacc13 = math_max_f32(vacc13, vmin);
vacc20 = math_max_f32(vacc20, vmin);
vacc21 = math_max_f32(vacc21, vmin);
vacc22 = math_max_f32(vacc22, vmin);
vacc23 = math_max_f32(vacc23, vmin);
vacc30 = math_max_f32(vacc30, vmin);
vacc31 = math_max_f32(vacc31, vmin);
vacc32 = math_max_f32(vacc32, vmin);
vacc33 = math_max_f32(vacc33, vmin);
vacc00 = math_min_f32(vacc00, vmax);
vacc01 = math_min_f32(vacc01, vmax);
vacc02 = math_min_f32(vacc02, vmax);
vacc03 = math_min_f32(vacc03, vmax);
vacc10 = math_min_f32(vacc10, vmax);
vacc11 = math_min_f32(vacc11, vmax);
vacc12 = math_min_f32(vacc12, vmax);
vacc13 = math_min_f32(vacc13, vmax);
vacc20 = math_min_f32(vacc20, vmax);
vacc21 = math_min_f32(vacc21, vmax);
vacc22 = math_min_f32(vacc22, vmax);
vacc23 = math_min_f32(vacc23, vmax);
vacc30 = math_min_f32(vacc30, vmax);
vacc31 = math_min_f32(vacc31, vmax);
vacc32 = math_min_f32(vacc32, vmax);
vacc33 = math_min_f32(vacc33, vmax);
if XNN_LIKELY(nc >= 4) {
c3[0] = vacc30;
c3[1] = vacc31;
c3[2] = vacc32;
c3[3] = vacc33;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2[2] = vacc22;
c2[3] = vacc23;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = vacc30;
c3[1] = vacc31;
vacc30 = vacc32;
c3 += 2;
c2[0] = vacc20;
c2[1] = vacc21;
vacc20 = vacc22;
c2 += 2;
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 5,758 | 27.230392 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x4-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_gemm_minmax_ukernel_4x4__wasm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
float vacc00 = w[0];
float vacc01 = w[1];
float vacc02 = w[2];
float vacc03 = w[3];
w += 4;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc22 = vacc02;
float vacc23 = vacc03;
float vacc30 = vacc00;
float vacc31 = vacc01;
float vacc32 = vacc02;
float vacc33 = vacc03;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = w[0];
const float vb1 = w[1];
const float vb2 = w[2];
const float vb3 = w[3];
w += 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc22 = math_muladd_f32(va2, vb2, vacc22);
vacc23 = math_muladd_f32(va2, vb3, vacc23);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
vacc32 = math_muladd_f32(va3, vb2, vacc32);
vacc33 = math_muladd_f32(va3, vb3, vacc33);
k -= sizeof(float);
} while (k != 0);
vacc00 = __builtin_wasm_max_f32(vacc00, vmin);
vacc01 = __builtin_wasm_max_f32(vacc01, vmin);
vacc02 = __builtin_wasm_max_f32(vacc02, vmin);
vacc03 = __builtin_wasm_max_f32(vacc03, vmin);
vacc10 = __builtin_wasm_max_f32(vacc10, vmin);
vacc11 = __builtin_wasm_max_f32(vacc11, vmin);
vacc12 = __builtin_wasm_max_f32(vacc12, vmin);
vacc13 = __builtin_wasm_max_f32(vacc13, vmin);
vacc20 = __builtin_wasm_max_f32(vacc20, vmin);
vacc21 = __builtin_wasm_max_f32(vacc21, vmin);
vacc22 = __builtin_wasm_max_f32(vacc22, vmin);
vacc23 = __builtin_wasm_max_f32(vacc23, vmin);
vacc30 = __builtin_wasm_max_f32(vacc30, vmin);
vacc31 = __builtin_wasm_max_f32(vacc31, vmin);
vacc32 = __builtin_wasm_max_f32(vacc32, vmin);
vacc33 = __builtin_wasm_max_f32(vacc33, vmin);
vacc00 = __builtin_wasm_min_f32(vacc00, vmax);
vacc01 = __builtin_wasm_min_f32(vacc01, vmax);
vacc02 = __builtin_wasm_min_f32(vacc02, vmax);
vacc03 = __builtin_wasm_min_f32(vacc03, vmax);
vacc10 = __builtin_wasm_min_f32(vacc10, vmax);
vacc11 = __builtin_wasm_min_f32(vacc11, vmax);
vacc12 = __builtin_wasm_min_f32(vacc12, vmax);
vacc13 = __builtin_wasm_min_f32(vacc13, vmax);
vacc20 = __builtin_wasm_min_f32(vacc20, vmax);
vacc21 = __builtin_wasm_min_f32(vacc21, vmax);
vacc22 = __builtin_wasm_min_f32(vacc22, vmax);
vacc23 = __builtin_wasm_min_f32(vacc23, vmax);
vacc30 = __builtin_wasm_min_f32(vacc30, vmax);
vacc31 = __builtin_wasm_min_f32(vacc31, vmax);
vacc32 = __builtin_wasm_min_f32(vacc32, vmax);
vacc33 = __builtin_wasm_min_f32(vacc33, vmax);
if XNN_LIKELY(nc >= 4) {
c3[0] = vacc30;
c3[1] = vacc31;
c3[2] = vacc32;
c3[3] = vacc33;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2[2] = vacc22;
c2[3] = vacc23;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = vacc30;
c3[1] = vacc31;
vacc30 = vacc32;
c3 += 2;
c2[0] = vacc20;
c2[1] = vacc21;
vacc20 = vacc22;
c2 += 2;
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 6,076 | 28.789216 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x4-relu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_gemm_relu_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float vacc00 = w[0];
float vacc01 = w[1];
float vacc02 = w[2];
float vacc03 = w[3];
w += 4;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc22 = vacc02;
float vacc23 = vacc03;
float vacc30 = vacc00;
float vacc31 = vacc01;
float vacc32 = vacc02;
float vacc33 = vacc03;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = w[0];
const float vb1 = w[1];
const float vb2 = w[2];
const float vb3 = w[3];
w += 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc22 = math_muladd_f32(va2, vb2, vacc22);
vacc23 = math_muladd_f32(va2, vb3, vacc23);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
vacc32 = math_muladd_f32(va3, vb2, vacc32);
vacc33 = math_muladd_f32(va3, vb3, vacc33);
k -= sizeof(float);
} while (k != 0);
vacc00 = math_max_f32(vacc00, 0.0f);
vacc01 = math_max_f32(vacc01, 0.0f);
vacc02 = math_max_f32(vacc02, 0.0f);
vacc03 = math_max_f32(vacc03, 0.0f);
vacc10 = math_max_f32(vacc10, 0.0f);
vacc11 = math_max_f32(vacc11, 0.0f);
vacc12 = math_max_f32(vacc12, 0.0f);
vacc13 = math_max_f32(vacc13, 0.0f);
vacc20 = math_max_f32(vacc20, 0.0f);
vacc21 = math_max_f32(vacc21, 0.0f);
vacc22 = math_max_f32(vacc22, 0.0f);
vacc23 = math_max_f32(vacc23, 0.0f);
vacc30 = math_max_f32(vacc30, 0.0f);
vacc31 = math_max_f32(vacc31, 0.0f);
vacc32 = math_max_f32(vacc32, 0.0f);
vacc33 = math_max_f32(vacc33, 0.0f);
if XNN_LIKELY(nc >= 4) {
c3[0] = vacc30;
c3[1] = vacc31;
c3[2] = vacc32;
c3[3] = vacc33;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2[2] = vacc22;
c2[3] = vacc23;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = vacc30;
c3[1] = vacc31;
vacc30 = vacc32;
c3 += 2;
c2[0] = vacc20;
c2[1] = vacc21;
vacc20 = vacc22;
c2 += 2;
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 5,015 | 26.113514 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x4-relu-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_gemm_relu_ukernel_4x4__wasm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float vacc00 = w[0];
float vacc01 = w[1];
float vacc02 = w[2];
float vacc03 = w[3];
w += 4;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc22 = vacc02;
float vacc23 = vacc03;
float vacc30 = vacc00;
float vacc31 = vacc01;
float vacc32 = vacc02;
float vacc33 = vacc03;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = w[0];
const float vb1 = w[1];
const float vb2 = w[2];
const float vb3 = w[3];
w += 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc22 = math_muladd_f32(va2, vb2, vacc22);
vacc23 = math_muladd_f32(va2, vb3, vacc23);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
vacc32 = math_muladd_f32(va3, vb2, vacc32);
vacc33 = math_muladd_f32(va3, vb3, vacc33);
k -= sizeof(float);
} while (k != 0);
vacc00 = __builtin_wasm_max_f32(vacc00, 0.0f);
vacc01 = __builtin_wasm_max_f32(vacc01, 0.0f);
vacc02 = __builtin_wasm_max_f32(vacc02, 0.0f);
vacc03 = __builtin_wasm_max_f32(vacc03, 0.0f);
vacc10 = __builtin_wasm_max_f32(vacc10, 0.0f);
vacc11 = __builtin_wasm_max_f32(vacc11, 0.0f);
vacc12 = __builtin_wasm_max_f32(vacc12, 0.0f);
vacc13 = __builtin_wasm_max_f32(vacc13, 0.0f);
vacc20 = __builtin_wasm_max_f32(vacc20, 0.0f);
vacc21 = __builtin_wasm_max_f32(vacc21, 0.0f);
vacc22 = __builtin_wasm_max_f32(vacc22, 0.0f);
vacc23 = __builtin_wasm_max_f32(vacc23, 0.0f);
vacc30 = __builtin_wasm_max_f32(vacc30, 0.0f);
vacc31 = __builtin_wasm_max_f32(vacc31, 0.0f);
vacc32 = __builtin_wasm_max_f32(vacc32, 0.0f);
vacc33 = __builtin_wasm_max_f32(vacc33, 0.0f);
if XNN_LIKELY(nc >= 4) {
c3[0] = vacc30;
c3[1] = vacc31;
c3[2] = vacc32;
c3[3] = vacc33;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2[2] = vacc22;
c2[3] = vacc23;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = vacc30;
c3[1] = vacc31;
vacc30 = vacc32;
c3 += 2;
c2[0] = vacc20;
c2[1] = vacc21;
vacc20 = vacc22;
c2 += 2;
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 5,173 | 26.967568 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x4-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_gemm_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float vacc00 = w[0];
float vacc01 = w[1];
float vacc02 = w[2];
float vacc03 = w[3];
w += 4;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc22 = vacc02;
float vacc23 = vacc03;
float vacc30 = vacc00;
float vacc31 = vacc01;
float vacc32 = vacc02;
float vacc33 = vacc03;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = w[0];
const float vb1 = w[1];
const float vb2 = w[2];
const float vb3 = w[3];
w += 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc22 = math_muladd_f32(va2, vb2, vacc22);
vacc23 = math_muladd_f32(va2, vb3, vacc23);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
vacc32 = math_muladd_f32(va3, vb2, vacc32);
vacc33 = math_muladd_f32(va3, vb3, vacc33);
k -= sizeof(float);
} while (k != 0);
if XNN_LIKELY(nc >= 4) {
c3[0] = vacc30;
c3[1] = vacc31;
c3[2] = vacc32;
c3[3] = vacc33;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2[2] = vacc22;
c2[3] = vacc23;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = vacc30;
c3[1] = vacc31;
vacc30 = vacc32;
c3 += 2;
c2[0] = vacc20;
c2[1] = vacc21;
vacc20 = vacc22;
c2 += 2;
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 4,357 | 24.786982 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-minmax-aarch64-neonfma-lane-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8__aarch64_neonfma_lane_ld128(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t va1 = vld1q_f32(a1); a1 += 4;
const float32x4_t va2 = vld1q_f32(a2); a2 += 4;
const float32x4_t va3 = vld1q_f32(a3); a3 += 4;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, vget_low_f32(va0), 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, vget_low_f32(va1), 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, vget_low_f32(va2), 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, vget_low_f32(va3), 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, vget_low_f32(va0), 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c0, vget_low_f32(va1), 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c0, vget_low_f32(va2), 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c0, vget_low_f32(va3), 0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, vget_low_f32(va0), 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, vget_low_f32(va1), 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, vget_low_f32(va2), 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, vget_low_f32(va3), 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, vget_low_f32(va0), 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c1, vget_low_f32(va1), 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c1, vget_low_f32(va2), 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c1, vget_low_f32(va3), 1);
const float32x4_t vb0123c2 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c2 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c2, vget_high_f32(va0), 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c2, vget_high_f32(va1), 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c2, vget_high_f32(va2), 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c2, vget_high_f32(va3), 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c2, vget_high_f32(va0), 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c2, vget_high_f32(va1), 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c2, vget_high_f32(va2), 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c2, vget_high_f32(va3), 0);
const float32x4_t vb0123c3 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c3 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c3, vget_high_f32(va0), 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c3, vget_high_f32(va1), 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c3, vget_high_f32(va2), 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c3, vget_high_f32(va3), 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c3, vget_high_f32(va0), 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c3, vget_high_f32(va1), 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c3, vget_high_f32(va2), 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c3, vget_high_f32(va3), 1);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
k -= sizeof(float);
} while (k != 0);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,792 | 37.735683 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-minmax-aarch64-neonfma-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8__aarch64_neonfma_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,955 | 34.489796 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-minmax-avx-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8__avx_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(w + 0);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
w += 8;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
w += 8;
vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 5,120 | 30.22561 | 85 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-minmax-fma3-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8__fma3_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(w + 0);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
w += 8;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
w += 8;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 5,069 | 29.914634 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-minmax-neon-dup-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8__neon_dup_ld128(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t va1 = vld1q_f32(a1); a1 += 4;
const float32x4_t va2 = vld1q_f32(a2); a2 += 4;
const float32x4_t va3 = vld1q_f32(a3); a3 += 4;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t va0c0 = vdupq_lane_f32(vget_low_f32(va0), 0);
const float32x4_t va1c0 = vdupq_lane_f32(vget_low_f32(va1), 0);
const float32x4_t va2c0 = vdupq_lane_f32(vget_low_f32(va2), 0);
const float32x4_t va3c0 = vdupq_lane_f32(vget_low_f32(va3), 0);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c0, vb4567c0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
const float32x4_t va0c1 = vdupq_lane_f32(vget_low_f32(va0), 1);
const float32x4_t va1c1 = vdupq_lane_f32(vget_low_f32(va1), 1);
const float32x4_t va2c1 = vdupq_lane_f32(vget_low_f32(va2), 1);
const float32x4_t va3c1 = vdupq_lane_f32(vget_low_f32(va3), 1);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c1, vb4567c1);
const float32x4_t vb0123c2 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c2 = vld1q_f32(w); w += 4;
const float32x4_t va0c2 = vdupq_lane_f32(vget_high_f32(va0), 0);
const float32x4_t va1c2 = vdupq_lane_f32(vget_high_f32(va1), 0);
const float32x4_t va2c2 = vdupq_lane_f32(vget_high_f32(va2), 0);
const float32x4_t va3c2 = vdupq_lane_f32(vget_high_f32(va3), 0);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c2, vb0123c2);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c2, vb0123c2);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c2, vb0123c2);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c2, vb0123c2);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c2, vb4567c2);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c2, vb4567c2);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c2, vb4567c2);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c2, vb4567c2);
const float32x4_t vb0123c3 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c3 = vld1q_f32(w); w += 4;
const float32x4_t va0c3 = vdupq_lane_f32(vget_high_f32(va0), 1);
const float32x4_t va1c3 = vdupq_lane_f32(vget_high_f32(va1), 1);
const float32x4_t va2c3 = vdupq_lane_f32(vget_high_f32(va2), 1);
const float32x4_t va3c3 = vdupq_lane_f32(vget_high_f32(va3), 1);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c3, vb0123c3);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c3, vb0123c3);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c3, vb0123c3);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c3, vb0123c3);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c3, vb4567c3);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c3, vb4567c3);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c3, vb4567c3);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c3, vb4567c3);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
k -= sizeof(float);
} while (k != 0);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,252 | 37.078189 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-minmax-neon-dup-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8__neon_dup_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
const float32x4_t va1c0 = vdupq_lane_f32(va1, 0);
const float32x4_t va2c0 = vdupq_lane_f32(va2, 0);
const float32x4_t va3c0 = vdupq_lane_f32(va3, 0);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c0, vb4567c0);
const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
const float32x4_t va1c1 = vdupq_lane_f32(va1, 1);
const float32x4_t va2c1 = vdupq_lane_f32(va2, 1);
const float32x4_t va3c1 = vdupq_lane_f32(va3, 1);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c1, vb4567c1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,295 | 34.764706 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-minmax-neon-lane-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8__neon_lane_ld128(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t va1 = vld1q_f32(a1); a1 += 4;
const float32x4_t va2 = vld1q_f32(a2); a2 += 4;
const float32x4_t va3 = vld1q_f32(a3); a3 += 4;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, vget_low_f32(va0), 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c0, vget_low_f32(va1), 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c0, vget_low_f32(va2), 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c0, vget_low_f32(va3), 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, vget_low_f32(va0), 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c0, vget_low_f32(va1), 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c0, vget_low_f32(va2), 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c0, vget_low_f32(va3), 0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, vget_low_f32(va0), 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c1, vget_low_f32(va1), 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c1, vget_low_f32(va2), 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c1, vget_low_f32(va3), 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, vget_low_f32(va0), 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c1, vget_low_f32(va1), 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c1, vget_low_f32(va2), 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c1, vget_low_f32(va3), 1);
const float32x4_t vb0123c2 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c2 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c2, vget_high_f32(va0), 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c2, vget_high_f32(va1), 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c2, vget_high_f32(va2), 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c2, vget_high_f32(va3), 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c2, vget_high_f32(va0), 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c2, vget_high_f32(va1), 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c2, vget_high_f32(va2), 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c2, vget_high_f32(va3), 0);
const float32x4_t vb0123c3 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c3 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c3, vget_high_f32(va0), 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c3, vget_high_f32(va1), 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c3, vget_high_f32(va2), 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c3, vget_high_f32(va3), 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c3, vget_high_f32(va0), 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c3, vget_high_f32(va1), 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c3, vget_high_f32(va2), 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c3, vget_high_f32(va3), 1);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
k -= sizeof(float);
} while (k != 0);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,781 | 37.687225 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-minmax-neon-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8__neon_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,944 | 34.433673 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-minmax-neonfma-dup-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8__neonfma_dup_ld128(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t va1 = vld1q_f32(a1); a1 += 4;
const float32x4_t va2 = vld1q_f32(a2); a2 += 4;
const float32x4_t va3 = vld1q_f32(a3); a3 += 4;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t va0c0 = vdupq_lane_f32(vget_low_f32(va0), 0);
const float32x4_t va1c0 = vdupq_lane_f32(vget_low_f32(va1), 0);
const float32x4_t va2c0 = vdupq_lane_f32(vget_low_f32(va2), 0);
const float32x4_t va3c0 = vdupq_lane_f32(vget_low_f32(va3), 0);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c0, vb4567c0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
const float32x4_t va0c1 = vdupq_lane_f32(vget_low_f32(va0), 1);
const float32x4_t va1c1 = vdupq_lane_f32(vget_low_f32(va1), 1);
const float32x4_t va2c1 = vdupq_lane_f32(vget_low_f32(va2), 1);
const float32x4_t va3c1 = vdupq_lane_f32(vget_low_f32(va3), 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c1, vb4567c1);
const float32x4_t vb0123c2 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c2 = vld1q_f32(w); w += 4;
const float32x4_t va0c2 = vdupq_lane_f32(vget_high_f32(va0), 0);
const float32x4_t va1c2 = vdupq_lane_f32(vget_high_f32(va1), 0);
const float32x4_t va2c2 = vdupq_lane_f32(vget_high_f32(va2), 0);
const float32x4_t va3c2 = vdupq_lane_f32(vget_high_f32(va3), 0);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c2, vb0123c2);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c2, vb0123c2);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c2, vb0123c2);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c2, vb0123c2);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c2, vb4567c2);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c2, vb4567c2);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c2, vb4567c2);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c2, vb4567c2);
const float32x4_t vb0123c3 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c3 = vld1q_f32(w); w += 4;
const float32x4_t va0c3 = vdupq_lane_f32(vget_high_f32(va0), 1);
const float32x4_t va1c3 = vdupq_lane_f32(vget_high_f32(va1), 1);
const float32x4_t va2c3 = vdupq_lane_f32(vget_high_f32(va2), 1);
const float32x4_t va3c3 = vdupq_lane_f32(vget_high_f32(va3), 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c3, vb0123c3);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c3, vb0123c3);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c3, vb0123c3);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c3, vb0123c3);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c3, vb4567c3);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c3, vb4567c3);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c3, vb4567c3);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c3, vb4567c3);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
k -= sizeof(float);
} while (k != 0);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,255 | 37.090535 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-minmax-neonfma-dup-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8__neonfma_dup_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
const float32x4_t va1c0 = vdupq_lane_f32(va1, 0);
const float32x4_t va2c0 = vdupq_lane_f32(va2, 0);
const float32x4_t va3c0 = vdupq_lane_f32(va3, 0);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c0, vb4567c0);
const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
const float32x4_t va1c1 = vdupq_lane_f32(va1, 1);
const float32x4_t va2c1 = vdupq_lane_f32(va2, 1);
const float32x4_t va3c1 = vdupq_lane_f32(va3, 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c1, vb4567c1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,298 | 34.779412 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-minmax-sse-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-dup.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8__sse_dup(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128 vacc0x0123 = _mm_load_ps(w + 0);
__m128 vacc0x4567 = _mm_load_ps(w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
const __m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128 va0c0000 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 va1c0000 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 va2c0000 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 va3c0000 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
const __m128 va0c1111 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 va1c1111 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 va2c1111 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 va3c1111 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
const __m128 va0c2222 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 va1c2222 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 va2c2222 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 va3c2222 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
k -= sizeof(float);
} while (k != 0);
}
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 9,991 | 36.70566 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-minmax-sse-load1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-load1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8__sse_load1(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128 vacc0x0123 = _mm_load_ps(w + 0);
__m128 vacc0x4567 = _mm_load_ps(w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
k -= sizeof(float);
} while (k != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 5,480 | 29.281768 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-minmax-sse2-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-dup.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128 vacc0x0123 = _mm_load_ps(w + 0);
__m128 vacc0x4567 = _mm_load_ps(w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
const __m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va3c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va3c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va3c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
k -= sizeof(float);
} while (k != 0);
}
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 10,400 | 38.249057 | 114 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-minmax-wasmrelaxedsimd-fma-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8__wasmrelaxedsimd_fma_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,164 | 33.060773 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-minmax-wasmrelaxedsimd-fma-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8__wasmrelaxedsimd_fma_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb0123c0, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb4567c0, vacc3x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb0123c1, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb4567c1, vacc3x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb0123c2, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb4567c2, vacc3x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb0123c3, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb4567c3, vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,762 | 40.396154 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-minmax-wasmrelaxedsimd-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8__wasmrelaxedsimd_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,136 | 32.906077 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-minmax-wasmrelaxedsimd-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8__wasmrelaxedsimd_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,638 | 39.919231 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-minmax-wasmsimd-arm-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,845 | 31.298343 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-minmax-wasmsimd-arm-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,347 | 38.8 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-minmax-wasmsimd-x86-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,861 | 31.38674 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-minmax-wasmsimd-x86-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,363 | 38.861538 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-relu-wasmrelaxedsimd-fma-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_relu_ukernel_4x8__wasmrelaxedsimd_fma_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
k -= sizeof(float);
} while (k != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,387 | 30.508772 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-relu-wasmrelaxedsimd-fma-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_relu_ukernel_4x8__wasmrelaxedsimd_fma_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb0123c0, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb4567c0, vacc3x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb0123c1, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb4567c1, vacc3x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb0123c2, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb4567c2, vacc3x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb0123c3, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb4567c3, vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
k -= sizeof(float);
} while (k != 0);
}
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,985 | 38.944 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-relu-wasmsimd-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
k -= sizeof(float);
} while (k != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,352 | 30.304094 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-relu-wasmsimd-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
k -= sizeof(float);
} while (k != 0);
}
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,854 | 38.42 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-wasmrelaxedsimd-fma-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_ukernel_4x8__wasmrelaxedsimd_fma_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
k -= sizeof(float);
} while (k != 0);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,917 | 29.358025 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-wasmrelaxedsimd-fma-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_ukernel_4x8__wasmrelaxedsimd_fma_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb0123c0, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb4567c0, vacc3x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb0123c1, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb4567c1, vacc3x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb0123c2, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb4567c2, vacc3x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb0123c3, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb4567c3, vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
k -= sizeof(float);
} while (k != 0);
}
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,515 | 38.485477 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-wasmsimd-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_ukernel_4x8__wasmsimd_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
k -= sizeof(float);
} while (k != 0);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,882 | 29.141975 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8-wasmsimd-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_ukernel_4x8__wasmsimd_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
k -= sizeof(float);
} while (k != 0);
}
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,384 | 37.941909 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8s4-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8s4__sse(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128 vacc0x0123 = _mm_load_ps(w + 0);
__m128 vacc0x4567 = _mm_load_ps(w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
__m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
__m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
__m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
__m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c3));
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
__m128 va0 = _mm_loadu_ps(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
__m128 va1 = _mm_loadu_ps(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
__m128 va2 = _mm_loadu_ps(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
__m128 va3 = _mm_loadu_ps(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va0), vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va1), vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va2), vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va3), vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va0), vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va1), vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va2), vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va3), vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va0), vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va1), vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va2), vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va3), vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va0), vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va1), vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va2), vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va3), vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va0), vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va1), vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va2), vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va3), vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va0), vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va1), vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va2), vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va3), vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va0), vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va1), vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va2), vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va3), vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va0), vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va1), vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va2), vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va3), vb4567c3));
w += 32;
}
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 13,883 | 44.821782 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8s4-minmax-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8s4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c0, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c0, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c1, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c1, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c2, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c2, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c3, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c3, vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc3x4567);
w += 32;
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,797 | 46.735484 | 130 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8s4-minmax-wasmrelaxedsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8s4__wasmrelaxedsimd(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
w += 32;
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,601 | 46.103226 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8s4-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8s4__wasmsimd_arm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
w += 32;
}
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,310 | 45.164516 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8s4-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_4x8s4__wasmsimd_x86(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
w += 32;
}
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,326 | 45.216129 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8s4-relu-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_relu_ukernel_4x8s4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c0, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c0, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c1, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c1, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c2, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c2, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c3, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c3, vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc3x4567);
w += 32;
}
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,020 | 45.736667 | 130 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8s4-relu-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_relu_ukernel_4x8s4__wasmsimd(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
w += 32;
}
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 13,817 | 45.06 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8s4-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_ukernel_4x8s4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c0, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c0, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c1, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c1, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c2, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c2, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123c3, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567c3, vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc3x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc3x4567);
w += 32;
}
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 13,550 | 45.56701 | 130 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-4x8s4-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_ukernel_4x8s4__wasmsimd(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
v128_t va1 = wasm_v128_load(a1);
a1 += 4;
v128_t va2 = wasm_v128_load(a2);
a2 += 4;
v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567c3), vacc3x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
v128_t va1 = wasm_v128_load(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
v128_t va2 = wasm_v128_load(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
v128_t va3 = wasm_v128_load(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc3x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc3x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc3x4567);
w += 32;
}
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 13,347 | 44.869416 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x16-minmax-avx-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_5x16__avx_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(w + 0);
__m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc4x01234567 = vacc0x01234567;
__m256 vacc4x89ABCDEF = vacc0x89ABCDEF;
w += 16;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
w += 16;
vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
vacc4x01234567 = _mm256_add_ps(vacc4x01234567, _mm256_mul_ps(va4, vb01234567));
vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF));
vacc1x89ABCDEF = _mm256_add_ps(vacc1x89ABCDEF, _mm256_mul_ps(va1, vb89ABCDEF));
vacc2x89ABCDEF = _mm256_add_ps(vacc2x89ABCDEF, _mm256_mul_ps(va2, vb89ABCDEF));
vacc3x89ABCDEF = _mm256_add_ps(vacc3x89ABCDEF, _mm256_mul_ps(va3, vb89ABCDEF));
vacc4x89ABCDEF = _mm256_add_ps(vacc4x89ABCDEF, _mm256_mul_ps(va4, vb89ABCDEF));
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_max_ps(vmin, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_max_ps(vmin, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_max_ps(vmin, vacc3x89ABCDEF);
vacc4x89ABCDEF = _mm256_max_ps(vmin, vacc4x89ABCDEF);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_min_ps(vmax, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_min_ps(vmax, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_min_ps(vmax, vacc3x89ABCDEF);
vacc4x89ABCDEF = _mm256_min_ps(vmax, vacc4x89ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm256_storeu_ps(c4, vacc4x01234567);
_mm256_storeu_ps(c4 + 8, vacc4x89ABCDEF);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
_mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 8) {
_mm256_storeu_ps(c4, vacc4x01234567);
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c0, vacc0x01234567);
vacc4x01234567 = vacc4x89ABCDEF;
vacc3x01234567 = vacc3x89ABCDEF;
vacc2x01234567 = vacc2x89ABCDEF;
vacc1x01234567 = vacc1x89ABCDEF;
vacc0x01234567 = vacc0x89ABCDEF;
c4 += 8;
c3 += 8;
c2 += 8;
c1 += 8;
c0 += 8;
}
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 8,189 | 34.301724 | 85 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x16-minmax-avx512f-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx512-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_gemm_minmax_ukernel_5x16__avx512f_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m512 vacc0x0123456789ABCDEF = _mm512_load_ps(w);
__m512 vacc1x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc2x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc3x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc4x0123456789ABCDEF = vacc0x0123456789ABCDEF;
w += 16;
size_t k = kc;
do {
const __m512 vb0123456789ABCDEF = _mm512_load_ps(w);
w += 16;
const __m512 va0 = _mm512_set1_ps(*a0);
vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
const __m512 va1 = _mm512_set1_ps(*a1);
vacc1x0123456789ABCDEF = _mm512_fmadd_ps(va1, vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
const __m512 va2 = _mm512_set1_ps(*a2);
vacc2x0123456789ABCDEF = _mm512_fmadd_ps(va2, vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
const __m512 va3 = _mm512_set1_ps(*a3);
vacc3x0123456789ABCDEF = _mm512_fmadd_ps(va3, vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
const __m512 va4 = _mm512_set1_ps(*a4);
vacc4x0123456789ABCDEF = _mm512_fmadd_ps(va4, vb0123456789ABCDEF, vacc4x0123456789ABCDEF);
a0 += 1;
a1 += 1;
a2 += 1;
a3 += 1;
a4 += 1;
k -= sizeof(float);
} while (k != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
vacc0x0123456789ABCDEF = _mm512_max_ps(vmin, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_max_ps(vmin, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_max_ps(vmin, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_max_ps(vmin, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_max_ps(vmin, vacc4x0123456789ABCDEF);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
vacc0x0123456789ABCDEF = _mm512_min_ps(vmax, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_min_ps(vmax, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_min_ps(vmax, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_min_ps(vmax, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_min_ps(vmax, vacc4x0123456789ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm512_storeu_ps(c4, vacc4x0123456789ABCDEF);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 15) {
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
_mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF);
_mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
_mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
_mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
_mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
}
nc = 0;
}
} while (nc != 0);
}
| 5,178 | 33.993243 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x16-minmax-fma3-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_5x16__fma3_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(w + 0);
__m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc4x01234567 = vacc0x01234567;
__m256 vacc4x89ABCDEF = vacc0x89ABCDEF;
w += 16;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
w += 16;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEF, vacc3x89ABCDEF);
vacc4x89ABCDEF = _mm256_fmadd_ps(va4, vb89ABCDEF, vacc4x89ABCDEF);
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_max_ps(vmin, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_max_ps(vmin, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_max_ps(vmin, vacc3x89ABCDEF);
vacc4x89ABCDEF = _mm256_max_ps(vmin, vacc4x89ABCDEF);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_min_ps(vmax, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_min_ps(vmax, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_min_ps(vmax, vacc3x89ABCDEF);
vacc4x89ABCDEF = _mm256_min_ps(vmax, vacc4x89ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm256_storeu_ps(c4, vacc4x01234567);
_mm256_storeu_ps(c4 + 8, vacc4x89ABCDEF);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
_mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 8) {
_mm256_storeu_ps(c4, vacc4x01234567);
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c0, vacc0x01234567);
vacc4x01234567 = vacc4x89ABCDEF;
vacc3x01234567 = vacc3x89ABCDEF;
vacc2x01234567 = vacc2x89ABCDEF;
vacc1x01234567 = vacc1x89ABCDEF;
vacc0x01234567 = vacc0x89ABCDEF;
c4 += 8;
c3 += 8;
c2 += 8;
c1 += 8;
c0 += 8;
}
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 8,060 | 33.74569 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8-minmax-aarch64-neonfma-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_5x8__aarch64_neonfma_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c0, va4, 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c0, va4, 0);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c1, va4, 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c1, va4, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,310 | 35.774336 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8-minmax-avx-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_5x8__avx_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(w + 0);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc4x01234567 = vacc0x01234567;
w += 8;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
w += 8;
vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
vacc4x01234567 = _mm256_add_ps(vacc4x01234567, _mm256_mul_ps(va4, vb01234567));
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c4, vacc4x01234567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 6,102 | 31.636364 | 85 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8-minmax-fma3-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_5x8__fma3_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(w + 0);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc4x01234567 = vacc0x01234567;
w += 8;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
w += 8;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c4, vacc4x01234567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 6,038 | 31.294118 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8-minmax-neon-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_5x8__neon_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c0, va4, 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c0, va4, 0);
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c1, va4, 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c1, va4, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,299 | 35.725664 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8-minmax-sse-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-dup.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_5x8__sse_dup(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m128 vacc0x0123 = _mm_load_ps(w + 0);
__m128 vacc0x4567 = _mm_load_ps(w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
__m128 vacc4x0123 = vacc0x0123;
__m128 vacc4x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
const __m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128 va4 = _mm_loadu_ps(a4);
a4 += 4;
const __m128 va0c0000 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 va1c0000 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 va2c0000 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 va3c0000 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 va4c0000 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c0000, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c0000, vb4567c0));
const __m128 va0c1111 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 va1c1111 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 va2c1111 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 va3c1111 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 va4c1111 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c1111, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c1111, vb4567c1));
const __m128 va0c2222 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 va1c2222 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 va2c2222 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 va3c2222 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 va4c2222 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c2222, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c2222, vb4567c2));
const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va4c3333 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c3333, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c3333, vb4567c3));
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
const __m128 va4 = _mm_load1_ps(a4);
a4 += 1;
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
k -= sizeof(float);
} while (k != 0);
}
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 12,044 | 38.362745 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8-minmax-sse-load1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-load1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_5x8__sse_load1(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m128 vacc0x0123 = _mm_load_ps(w + 0);
__m128 vacc0x4567 = _mm_load_ps(w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
__m128 vacc4x0123 = vacc0x0123;
__m128 vacc4x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
const __m128 va4 = _mm_load1_ps(a4);
a4 += 1;
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
k -= sizeof(float);
} while (k != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 6,543 | 30.461538 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8-minmax-sse2-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-dup.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m128 vacc0x0123 = _mm_load_ps(w + 0);
__m128 vacc0x4567 = _mm_load_ps(w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
__m128 vacc4x0123 = vacc0x0123;
__m128 vacc4x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
const __m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128 va4 = _mm_loadu_ps(a4);
a4 += 4;
const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va3c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va4c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c0000, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c0000, vb4567c0));
const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va3c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va4c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c1111, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c1111, vb4567c1));
const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va3c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va4c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c2222, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c2222, vb4567c2));
const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va4c3333 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c3333, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c3333, vb4567c3));
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
const __m128 va4 = _mm_load1_ps(a4);
a4 += 1;
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
k -= sizeof(float);
} while (k != 0);
}
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 12,555 | 40.03268 | 114 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8-minmax-wasmrelaxedsimd-fma-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_5x8__wasmrelaxedsimd_fma_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567);
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,380 | 34.485577 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8-minmax-wasmrelaxedsimd-fma-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_5x8__wasmrelaxedsimd_fma_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb0123c0, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb0123c0, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c0, vb4567c0, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c0, vb4567c0, vacc4x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb0123c1, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb0123c1, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c1, vb4567c1, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c1, vb4567c1, vacc4x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb0123c2, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb0123c2, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c2, vb4567c2, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c2, vb4567c2, vacc4x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb0123c3, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb0123c3, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3c3, vb4567c3, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4c3, vb4567c3, vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,986 | 42.146179 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8-minmax-wasmrelaxedsimd-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_5x8__wasmrelaxedsimd_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,346 | 34.322115 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8-minmax-wasmrelaxedsimd-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_5x8__wasmrelaxedsimd_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,832 | 41.634551 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8-minmax-wasmsimd-arm-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_max(vmin, vacc4x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_max(vmin, vacc4x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_min(vmax, vacc4x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_min(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,983 | 32.576923 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8-minmax-wasmsimd-arm-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_max(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_max(vmin, vacc4x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_max(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_max(vmin, vacc4x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_min(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_min(vmax, vacc4x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_min(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_min(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,469 | 40.428571 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8-minmax-wasmsimd-x86-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123));
vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567));
vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123));
vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567));
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmax(vmin, vacc4x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmax(vmin, vacc4x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmin(vmax, vacc4x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmin(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,003 | 32.673077 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8-minmax-wasmsimd-x86-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va3 = wasm_v128_load(a3);
a3 += 4;
const v128_t va4 = wasm_v128_load(a4);
a4 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0);
const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb0123c0), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb0123c0), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c0, vb4567c0), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c0, vb4567c0), vacc4x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t va3c1 = wasm_v32x4_shuffle(va3, va3, 1, 1, 1, 1);
const v128_t va4c1 = wasm_v32x4_shuffle(va4, va4, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb0123c1), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb0123c1), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c1, vb4567c1), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c1, vb4567c1), vacc4x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t va3c2 = wasm_v32x4_shuffle(va3, va3, 2, 2, 2, 2);
const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb0123c2), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb0123c2), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c2, vb4567c2), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c2, vb4567c2), vacc4x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t va3c3 = wasm_v32x4_shuffle(va3, va3, 3, 3, 3, 3);
const v128_t va4c3 = wasm_v32x4_shuffle(va4, va4, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb0123c3), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb0123c3), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3c3, vb4567c3), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4c3, vb4567c3), vacc4x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb0123), vacc3x0123);
vacc4x0123 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb0123), vacc4x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(va3, vb4567), vacc3x4567);
vacc4x4567 = wasm_f32x4_add(wasm_f32x4_mul(va4, vb4567), vacc4x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmax(vmin, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmax(vmin, vacc4x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmax(vmin, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmax(vmin, vacc4x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc3x0123 = wasm_f32x4_pmin(vmax, vacc3x0123);
vacc4x0123 = wasm_f32x4_pmin(vmax, vacc4x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
vacc3x4567 = wasm_f32x4_pmin(vmax, vacc3x4567);
vacc4x4567 = wasm_f32x4_pmin(vmax, vacc4x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,489 | 40.495017 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-5x8-relu-wasmrelaxedsimd-fma-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_relu_ukernel_5x8__wasmrelaxedsimd_fma_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
v128_t vacc3x0123 = vacc0x0123;
v128_t vacc3x4567 = vacc0x4567;
v128_t vacc4x0123 = vacc0x0123;
v128_t vacc4x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t va3 = wasm_v128_load32_splat(a3);
a3 += 1;
const v128_t va4 = wasm_v128_load32_splat(a4);
a4 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
vacc3x0123 = __builtin_wasm_relaxed_madd_f32x4(va3, vb0123, vacc3x0123);
vacc3x4567 = __builtin_wasm_relaxed_madd_f32x4(va3, vb4567, vacc3x4567);
vacc4x0123 = __builtin_wasm_relaxed_madd_f32x4(va4, vb0123, vacc4x0123);
vacc4x4567 = __builtin_wasm_relaxed_madd_f32x4(va4, vb4567, vacc4x4567);
k -= sizeof(float);
} while (k != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c4, vacc4x0123);
wasm_v128_store(c3, vacc3x0123);
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c4, vacc4x0123, 0);
wasm_v128_store64_lane(c3, vacc3x0123, 0);
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc4x0123 = wasm_v64x2_shuffle(vacc4x0123, vacc4x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c4, vacc4x0123, 0);
wasm_v128_store32_lane(c3, vacc3x0123, 0);
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,431 | 31.816327 | 78 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.