repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-avx-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/avx.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__avx_x16_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m256 vt0 = _mm256_loadu_ps(input);
const __m256 vt1 = _mm256_loadu_ps(input + 8);
input += 16;
vacc0 = _mm256_add_ps(vacc0, vt0);
vacc1 = _mm256_add_ps(vacc1, vt1);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vt = _mm256_loadu_ps(input);
input += 8;
vacc0 = _mm256_add_ps(vacc0, vt);
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vt = _mm256_maskload_ps(input, vmask);
vacc0 = _mm256_add_ps(vacc0, vt);
}
__m128 vacc = _mm_add_ps(_mm256_castps256_ps128(vacc0), _mm256_extractf128_ps(vacc0, 1));
vacc = _mm_add_ps(vacc, _mm_movehl_ps(vacc, vacc));
vacc = _mm_add_ss(vacc, _mm_movehdup_ps(vacc));
vacc = _mm_mul_ss(vacc, _mm_load_ss(¶ms->avx.scale));
_mm_store_ss(output, vacc);
}
| 1,829 | 30.016949 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-avx-x24-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/avx.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__avx_x24_acc3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
__m256 vacc2 = _mm256_setzero_ps();
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const __m256 vt0 = _mm256_loadu_ps(input);
const __m256 vt1 = _mm256_loadu_ps(input + 8);
const __m256 vt2 = _mm256_loadu_ps(input + 16);
input += 24;
vacc0 = _mm256_add_ps(vacc0, vt0);
vacc1 = _mm256_add_ps(vacc1, vt1);
vacc2 = _mm256_add_ps(vacc2, vt2);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
vacc0 = _mm256_add_ps(vacc0, vacc2);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vt = _mm256_loadu_ps(input);
input += 8;
vacc0 = _mm256_add_ps(vacc0, vt);
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vt = _mm256_maskload_ps(input, vmask);
vacc0 = _mm256_add_ps(vacc0, vt);
}
__m128 vacc = _mm_add_ps(_mm256_castps256_ps128(vacc0), _mm256_extractf128_ps(vacc0, 1));
vacc = _mm_add_ps(vacc, _mm_movehl_ps(vacc, vacc));
vacc = _mm_add_ss(vacc, _mm_movehdup_ps(vacc));
vacc = _mm_mul_ss(vacc, _mm_load_ss(¶ms->avx.scale));
_mm_store_ss(output, vacc);
}
| 1,997 | 30.714286 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-avx-x32-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/avx.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__avx_x32_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m256 vt0 = _mm256_loadu_ps(input);
const __m256 vt1 = _mm256_loadu_ps(input + 8);
const __m256 vt2 = _mm256_loadu_ps(input + 16);
const __m256 vt3 = _mm256_loadu_ps(input + 24);
input += 32;
vacc0 = _mm256_add_ps(vacc0, vt0);
vacc1 = _mm256_add_ps(vacc1, vt1);
vacc0 = _mm256_add_ps(vacc0, vt2);
vacc1 = _mm256_add_ps(vacc1, vt3);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vt = _mm256_loadu_ps(input);
input += 8;
vacc0 = _mm256_add_ps(vacc0, vt);
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vt = _mm256_maskload_ps(input, vmask);
vacc0 = _mm256_add_ps(vacc0, vt);
}
__m128 vacc = _mm_add_ps(_mm256_castps256_ps128(vacc0), _mm256_extractf128_ps(vacc0, 1));
vacc = _mm_add_ps(vacc, _mm_movehl_ps(vacc, vacc));
vacc = _mm_add_ss(vacc, _mm_movehdup_ps(vacc));
vacc = _mm_mul_ss(vacc, _mm_load_ss(¶ms->avx.scale));
_mm_store_ss(output, vacc);
}
| 2,011 | 30.936508 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-avx-x32-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/avx.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__avx_x32_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
__m256 vacc2 = _mm256_setzero_ps();
__m256 vacc3 = _mm256_setzero_ps();
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m256 vt0 = _mm256_loadu_ps(input);
const __m256 vt1 = _mm256_loadu_ps(input + 8);
const __m256 vt2 = _mm256_loadu_ps(input + 16);
const __m256 vt3 = _mm256_loadu_ps(input + 24);
input += 32;
vacc0 = _mm256_add_ps(vacc0, vt0);
vacc1 = _mm256_add_ps(vacc1, vt1);
vacc2 = _mm256_add_ps(vacc2, vt2);
vacc3 = _mm256_add_ps(vacc3, vt3);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
vacc2 = _mm256_add_ps(vacc2, vacc3);
vacc0 = _mm256_add_ps(vacc0, vacc2);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vt = _mm256_loadu_ps(input);
input += 8;
vacc0 = _mm256_add_ps(vacc0, vt);
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vt = _mm256_maskload_ps(input, vmask);
vacc0 = _mm256_add_ps(vacc0, vt);
}
__m128 vacc = _mm_add_ps(_mm256_castps256_ps128(vacc0), _mm256_extractf128_ps(vacc0, 1));
vacc = _mm_add_ps(vacc, _mm_movehl_ps(vacc, vacc));
vacc = _mm_add_ss(vacc, _mm_movehdup_ps(vacc));
vacc = _mm_mul_ss(vacc, _mm_load_ss(¶ms->avx.scale));
_mm_store_ss(output, vacc);
}
| 2,165 | 31.328358 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/avx.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__avx_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m256 vacc0 = _mm256_setzero_ps();
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vt = _mm256_loadu_ps(input);
input += 8;
vacc0 = _mm256_add_ps(vacc0, vt);
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vt = _mm256_maskload_ps(input, vmask);
vacc0 = _mm256_add_ps(vacc0, vt);
}
__m128 vacc = _mm_add_ps(_mm256_castps256_ps128(vacc0), _mm256_extractf128_ps(vacc0, 1));
vacc = _mm_add_ps(vacc, _mm_movehl_ps(vacc, vacc));
vacc = _mm_add_ss(vacc, _mm_movehdup_ps(vacc));
vacc = _mm_mul_ss(vacc, _mm_load_ss(¶ms->avx.scale));
_mm_store_ss(output, vacc);
}
| 1,479 | 29.204082 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-neon-x12-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__neon_x12_acc3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
float32x4_t vacc2 = vmovq_n_f32(0.0f);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vt0 = vld1q_f32(input); input += 4;
const float32x4_t vt1 = vld1q_f32(input); input += 4;
const float32x4_t vt2 = vld1q_f32(input); input += 4;
vacc0 = vaddq_f32(vacc0, vt0);
vacc1 = vaddq_f32(vacc1, vt1);
vacc2 = vaddq_f32(vacc2, vt2);
}
vacc0 = vaddq_f32(vacc0, vacc1);
vacc0 = vaddq_f32(vacc0, vacc2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vt = vld1q_f32(input); input += 4;
vacc0 = vaddq_f32(vacc0, vt);
}
const float32x2_t vscale = vld1_dup_f32(¶ms->scalar.scale);
float32x2_t vacc = vadd_f32(vget_low_f32(vacc0), vget_high_f32(vacc0));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const float32x2_t vt = vld1_f32(input); input += 2;
vacc = vadd_f32(vacc, vt);
}
vacc = vpadd_f32(vacc, vacc);
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const float32x2_t vt = vld1_dup_f32(input);
vacc = vadd_f32(vacc, vt);
}
vacc = vmul_f32(vacc, vscale);
vst1_lane_f32(output, vacc, 0);
}
| 1,886 | 29.934426 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-neon-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__neon_x16_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vt0 = vld1q_f32(input); input += 4;
const float32x4_t vt1 = vld1q_f32(input); input += 4;
const float32x4_t vt2 = vld1q_f32(input); input += 4;
const float32x4_t vt3 = vld1q_f32(input); input += 4;
vacc0 = vaddq_f32(vacc0, vt0);
vacc1 = vaddq_f32(vacc1, vt1);
vacc0 = vaddq_f32(vacc0, vt2);
vacc1 = vaddq_f32(vacc1, vt3);
}
vacc0 = vaddq_f32(vacc0, vacc1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vt = vld1q_f32(input); input += 4;
vacc0 = vaddq_f32(vacc0, vt);
}
const float32x2_t vscale = vld1_dup_f32(¶ms->scalar.scale);
float32x2_t vacc = vadd_f32(vget_low_f32(vacc0), vget_high_f32(vacc0));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const float32x2_t vt = vld1_f32(input); input += 2;
vacc = vadd_f32(vacc, vt);
}
vacc = vpadd_f32(vacc, vacc);
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const float32x2_t vt = vld1_dup_f32(input);
vacc = vadd_f32(vacc, vt);
}
vacc = vmul_f32(vacc, vscale);
vst1_lane_f32(output, vacc, 0);
}
| 1,903 | 30.213115 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-neon-x16-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__neon_x16_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
float32x4_t vacc2 = vmovq_n_f32(0.0f);
float32x4_t vacc3 = vmovq_n_f32(0.0f);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vt0 = vld1q_f32(input); input += 4;
const float32x4_t vt1 = vld1q_f32(input); input += 4;
const float32x4_t vt2 = vld1q_f32(input); input += 4;
const float32x4_t vt3 = vld1q_f32(input); input += 4;
vacc0 = vaddq_f32(vacc0, vt0);
vacc1 = vaddq_f32(vacc1, vt1);
vacc2 = vaddq_f32(vacc2, vt2);
vacc3 = vaddq_f32(vacc3, vt3);
}
vacc0 = vaddq_f32(vacc0, vacc1);
vacc2 = vaddq_f32(vacc2, vacc3);
vacc0 = vaddq_f32(vacc0, vacc2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vt = vld1q_f32(input); input += 4;
vacc0 = vaddq_f32(vacc0, vt);
}
const float32x2_t vscale = vld1_dup_f32(¶ms->scalar.scale);
float32x2_t vacc = vadd_f32(vget_low_f32(vacc0), vget_high_f32(vacc0));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const float32x2_t vt = vld1_f32(input); input += 2;
vacc = vadd_f32(vacc, vt);
}
vacc = vpadd_f32(vacc, vacc);
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const float32x2_t vt = vld1_dup_f32(input);
vacc = vadd_f32(vacc, vt);
}
vacc = vmul_f32(vacc, vscale);
vst1_lane_f32(output, vacc, 0);
}
| 2,055 | 30.630769 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__neon_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vt = vld1q_f32(input); input += 4;
vacc0 = vaddq_f32(vacc0, vt);
}
const float32x2_t vscale = vld1_dup_f32(¶ms->scalar.scale);
float32x2_t vacc = vadd_f32(vget_low_f32(vacc0), vget_high_f32(vacc0));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const float32x2_t vt = vld1_f32(input); input += 2;
vacc = vadd_f32(vacc, vt);
}
vacc = vpadd_f32(vacc, vacc);
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const float32x2_t vt = vld1_dup_f32(input);
vacc = vadd_f32(vacc, vt);
}
vacc = vmul_f32(vacc, vscale);
vst1_lane_f32(output, vacc, 0);
}
| 1,375 | 27.666667 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-neon-x8-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__neon_x8_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vt0 = vld1q_f32(input); input += 4;
const float32x4_t vt1 = vld1q_f32(input); input += 4;
vacc0 = vaddq_f32(vacc0, vt0);
vacc1 = vaddq_f32(vacc1, vt1);
}
vacc0 = vaddq_f32(vacc0, vacc1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vt = vld1q_f32(input); input += 4;
vacc0 = vaddq_f32(vacc0, vt);
}
const float32x2_t vscale = vld1_dup_f32(¶ms->scalar.scale);
float32x2_t vacc = vadd_f32(vget_low_f32(vacc0), vget_high_f32(vacc0));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const float32x2_t vt = vld1_f32(input); input += 2;
vacc = vadd_f32(vacc, vt);
}
vacc = vpadd_f32(vacc, vacc);
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const float32x2_t vt = vld1_dup_f32(input);
vacc = vadd_f32(vacc, vt);
}
vacc = vmul_f32(vacc, vscale);
vst1_lane_f32(output, vacc, 0);
}
| 1,714 | 29.087719 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__scalar_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vacc0 = 0.0f;
do {
const float vt = *input++;
vacc0 += vt;
batch -= sizeof(float);
} while (batch != 0);
const float vscale = params->scalar.scale;
vacc0 *= vscale;
*output = vacc0;
}
| 854 | 22.108108 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-scalar-x2-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__scalar_x2_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vacc0 = 0.0f;
float vacc1 = 0.0f;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
input += 2;
vacc0 += vt0;
vacc1 += vt1;
}
vacc0 += vacc1;
if XNN_UNLIKELY(batch != 0) {
const float vt = *input;
vacc0 += vt;
}
const float vscale = params->scalar.scale;
vacc0 *= vscale;
*output = vacc0;
}
| 1,063 | 21.638298 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-scalar-x3-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__scalar_x3_acc3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vacc0 = 0.0f;
float vacc1 = 0.0f;
float vacc2 = 0.0f;
for (; batch >= 3 * sizeof(float); batch -= 3 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
const float vt2 = input[2];
input += 3;
vacc0 += vt0;
vacc1 += vt1;
vacc2 += vt2;
}
vacc0 += vacc1;
vacc0 += vacc2;
if XNN_UNLIKELY(batch != 0) {
do {
const float vt = *input++;
vacc0 += vt;
batch -= sizeof(float);
} while (batch != 0);
}
const float vscale = params->scalar.scale;
vacc0 *= vscale;
*output = vacc0;
}
| 1,224 | 21.685185 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-scalar-x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__scalar_x4_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vacc0 = 0.0f;
float vacc1 = 0.0f;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
const float vt2 = input[2];
const float vt3 = input[3];
input += 4;
vacc0 += vt0;
vacc1 += vt1;
vacc0 += vt2;
vacc1 += vt3;
}
vacc0 += vacc1;
if XNN_UNLIKELY(batch != 0) {
do {
const float vt = *input++;
vacc0 += vt;
batch -= sizeof(float);
} while (batch != 0);
}
const float vscale = params->scalar.scale;
vacc0 *= vscale;
*output = vacc0;
}
| 1,234 | 21.87037 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-scalar-x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__scalar_x4_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vacc0 = 0.0f;
float vacc1 = 0.0f;
float vacc2 = 0.0f;
float vacc3 = 0.0f;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
const float vt2 = input[2];
const float vt3 = input[3];
input += 4;
vacc0 += vt0;
vacc1 += vt1;
vacc2 += vt2;
vacc3 += vt3;
}
vacc0 += vacc1;
vacc2 += vacc3;
vacc0 += vacc2;
if XNN_UNLIKELY(batch != 0) {
do {
const float vt = *input++;
vacc0 += vt;
batch -= sizeof(float);
} while (batch != 0);
}
const float vscale = params->scalar.scale;
vacc0 *= vscale;
*output = vacc0;
}
| 1,314 | 21.672414 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-sse-x12-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/sse.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__sse_x12_acc3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m128 vacc0 = _mm_setzero_ps();
__m128 vacc1 = _mm_setzero_ps();
__m128 vacc2 = _mm_setzero_ps();
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const __m128 vt0 = _mm_loadu_ps(input);
const __m128 vt1 = _mm_loadu_ps(input + 4);
const __m128 vt2 = _mm_loadu_ps(input + 8);
input += 12;
vacc0 = _mm_add_ps(vacc0, vt0);
vacc1 = _mm_add_ps(vacc1, vt1);
vacc2 = _mm_add_ps(vacc2, vt2);
}
vacc0 = _mm_add_ps(vacc0, vacc1);
vacc0 = _mm_add_ps(vacc0, vacc2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vt = _mm_loadu_ps(input);
input += 4;
vacc0 = _mm_add_ps(vacc0, vt);
}
vacc0 = _mm_add_ps(vacc0, _mm_movehl_ps(vacc0, vacc0));
if XNN_UNLIKELY(batch != 0) {
do {
const __m128 vt = _mm_load_ss(input);
input += 1;
vacc0 = _mm_add_ss(vacc0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
vacc0 = _mm_add_ss(vacc0, _mm_shuffle_ps(vacc0, vacc0, _MM_SHUFFLE(1, 1, 1, 1)));
vacc0 = _mm_mul_ss(vacc0, _mm_load_ss(¶ms->scalar.scale));
_mm_store_ss(output, vacc0);
}
| 1,786 | 27.365079 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-sse-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/sse.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__sse_x16_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m128 vacc0 = _mm_setzero_ps();
__m128 vacc1 = _mm_setzero_ps();
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m128 vt0 = _mm_loadu_ps(input);
const __m128 vt1 = _mm_loadu_ps(input + 4);
const __m128 vt2 = _mm_loadu_ps(input + 8);
const __m128 vt3 = _mm_loadu_ps(input + 12);
input += 16;
vacc0 = _mm_add_ps(vacc0, vt0);
vacc1 = _mm_add_ps(vacc1, vt1);
vacc0 = _mm_add_ps(vacc0, vt2);
vacc1 = _mm_add_ps(vacc1, vt3);
}
vacc0 = _mm_add_ps(vacc0, vacc1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vt = _mm_loadu_ps(input);
input += 4;
vacc0 = _mm_add_ps(vacc0, vt);
}
vacc0 = _mm_add_ps(vacc0, _mm_movehl_ps(vacc0, vacc0));
if XNN_UNLIKELY(batch != 0) {
do {
const __m128 vt = _mm_load_ss(input);
input += 1;
vacc0 = _mm_add_ss(vacc0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
vacc0 = _mm_add_ss(vacc0, _mm_shuffle_ps(vacc0, vacc0, _MM_SHUFFLE(1, 1, 1, 1)));
vacc0 = _mm_mul_ss(vacc0, _mm_load_ss(¶ms->scalar.scale));
_mm_store_ss(output, vacc0);
}
| 1,800 | 27.587302 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-sse-x16-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/sse.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__sse_x16_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m128 vacc0 = _mm_setzero_ps();
__m128 vacc1 = _mm_setzero_ps();
__m128 vacc2 = _mm_setzero_ps();
__m128 vacc3 = _mm_setzero_ps();
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m128 vt0 = _mm_loadu_ps(input);
const __m128 vt1 = _mm_loadu_ps(input + 4);
const __m128 vt2 = _mm_loadu_ps(input + 8);
const __m128 vt3 = _mm_loadu_ps(input + 12);
input += 16;
vacc0 = _mm_add_ps(vacc0, vt0);
vacc1 = _mm_add_ps(vacc1, vt1);
vacc2 = _mm_add_ps(vacc2, vt2);
vacc3 = _mm_add_ps(vacc3, vt3);
}
vacc0 = _mm_add_ps(vacc0, vacc1);
vacc2 = _mm_add_ps(vacc2, vacc3);
vacc0 = _mm_add_ps(vacc0, vacc2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vt = _mm_loadu_ps(input);
input += 4;
vacc0 = _mm_add_ps(vacc0, vt);
}
vacc0 = _mm_add_ps(vacc0, _mm_movehl_ps(vacc0, vacc0));
if XNN_UNLIKELY(batch != 0) {
do {
const __m128 vt = _mm_load_ss(input);
input += 1;
vacc0 = _mm_add_ss(vacc0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
vacc0 = _mm_add_ss(vacc0, _mm_shuffle_ps(vacc0, vacc0, _MM_SHUFFLE(1, 1, 1, 1)));
vacc0 = _mm_mul_ss(vacc0, _mm_load_ss(¶ms->scalar.scale));
_mm_store_ss(output, vacc0);
}
| 1,942 | 28 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-sse-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/sse.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__sse_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m128 vacc0 = _mm_setzero_ps();
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vt = _mm_loadu_ps(input);
input += 4;
vacc0 = _mm_add_ps(vacc0, vt);
}
vacc0 = _mm_add_ps(vacc0, _mm_movehl_ps(vacc0, vacc0));
if XNN_UNLIKELY(batch != 0) {
do {
const __m128 vt = _mm_load_ss(input);
input += 1;
vacc0 = _mm_add_ss(vacc0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
vacc0 = _mm_add_ss(vacc0, _mm_shuffle_ps(vacc0, vacc0, _MM_SHUFFLE(1, 1, 1, 1)));
vacc0 = _mm_mul_ss(vacc0, _mm_load_ss(¶ms->scalar.scale));
_mm_store_ss(output, vacc0);
}
| 1,299 | 25.530612 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-sse-x8-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/sse.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__sse_x8_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m128 vacc0 = _mm_setzero_ps();
__m128 vacc1 = _mm_setzero_ps();
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vt0 = _mm_loadu_ps(input);
const __m128 vt1 = _mm_loadu_ps(input + 4);
input += 8;
vacc0 = _mm_add_ps(vacc0, vt0);
vacc1 = _mm_add_ps(vacc1, vt1);
}
vacc0 = _mm_add_ps(vacc0, vacc1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vt = _mm_loadu_ps(input);
input += 4;
vacc0 = _mm_add_ps(vacc0, vt);
}
vacc0 = _mm_add_ps(vacc0, _mm_movehl_ps(vacc0, vacc0));
if XNN_UNLIKELY(batch != 0) {
do {
const __m128 vt = _mm_load_ss(input);
input += 1;
vacc0 = _mm_add_ss(vacc0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
vacc0 = _mm_add_ss(vacc0, _mm_shuffle_ps(vacc0, vacc0, _MM_SHUFFLE(1, 1, 1, 1)));
vacc0 = _mm_mul_ss(vacc0, _mm_load_ss(¶ms->scalar.scale));
_mm_store_ss(output, vacc0);
}
| 1,627 | 26.59322 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-wasmsimd-x12-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__wasmsimd_x12_acc3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
v128_t vacc1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc2 = wasm_f32x4_const_splat(0.0f);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
const v128_t vt2 = wasm_v128_load(input + 8);
input += 12;
vacc0 = wasm_f32x4_add(vacc0, vt0);
vacc1 = wasm_f32x4_add(vacc1, vt1);
vacc2 = wasm_f32x4_add(vacc2, vt2);
}
vacc0 = wasm_f32x4_add(vacc0, vacc1);
vacc0 = wasm_f32x4_add(vacc0, vacc2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vacc0 = wasm_f32x4_add(vacc0, vt);
}
vacc0 = wasm_f32x4_add(vacc0, wasm_v64x2_shuffle(vacc0, vacc0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vacc0 = wasm_f32x4_add(vacc0, vt);
}
vacc0 = wasm_f32x4_add(vacc0, wasm_v32x4_shuffle(vacc0, vacc0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vacc0 = wasm_f32x4_add(vacc0, vt);
}
const v128_t vscale = wasm_v128_load32_zero(¶ms->scalar.scale);
vacc0 = wasm_f32x4_mul(vacc0, vscale);
wasm_v128_store32_lane(output, vacc0, 0);
}
| 2,040 | 30.4 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-wasmsimd-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__wasmsimd_x16_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
v128_t vacc1 = wasm_f32x4_const_splat(0.0f);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
const v128_t vt2 = wasm_v128_load(input + 8);
const v128_t vt3 = wasm_v128_load(input + 12);
input += 16;
vacc0 = wasm_f32x4_add(vacc0, vt0);
vacc1 = wasm_f32x4_add(vacc1, vt1);
vacc0 = wasm_f32x4_add(vacc0, vt2);
vacc1 = wasm_f32x4_add(vacc1, vt3);
}
vacc0 = wasm_f32x4_add(vacc0, vacc1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vacc0 = wasm_f32x4_add(vacc0, vt);
}
vacc0 = wasm_f32x4_add(vacc0, wasm_v64x2_shuffle(vacc0, vacc0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vacc0 = wasm_f32x4_add(vacc0, vt);
}
vacc0 = wasm_f32x4_add(vacc0, wasm_v32x4_shuffle(vacc0, vacc0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vacc0 = wasm_f32x4_add(vacc0, vt);
}
const v128_t vscale = wasm_v128_load32_zero(¶ms->scalar.scale);
vacc0 = wasm_f32x4_mul(vacc0, vscale);
wasm_v128_store32_lane(output, vacc0, 0);
}
| 2,044 | 30.461538 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-wasmsimd-x16-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__wasmsimd_x16_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
v128_t vacc1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc2 = wasm_f32x4_const_splat(0.0f);
v128_t vacc3 = wasm_f32x4_const_splat(0.0f);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
const v128_t vt2 = wasm_v128_load(input + 8);
const v128_t vt3 = wasm_v128_load(input + 12);
input += 16;
vacc0 = wasm_f32x4_add(vacc0, vt0);
vacc1 = wasm_f32x4_add(vacc1, vt1);
vacc2 = wasm_f32x4_add(vacc2, vt2);
vacc3 = wasm_f32x4_add(vacc3, vt3);
}
vacc0 = wasm_f32x4_add(vacc0, vacc1);
vacc2 = wasm_f32x4_add(vacc2, vacc3);
vacc0 = wasm_f32x4_add(vacc0, vacc2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vacc0 = wasm_f32x4_add(vacc0, vt);
}
vacc0 = wasm_f32x4_add(vacc0, wasm_v64x2_shuffle(vacc0, vacc0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vacc0 = wasm_f32x4_add(vacc0, vt);
}
vacc0 = wasm_f32x4_add(vacc0, wasm_v32x4_shuffle(vacc0, vacc0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vacc0 = wasm_f32x4_add(vacc0, vt);
}
const v128_t vscale = wasm_v128_load32_zero(¶ms->scalar.scale);
vacc0 = wasm_f32x4_mul(vacc0, vscale);
wasm_v128_store32_lane(output, vacc0, 0);
}
| 2,218 | 31.15942 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__wasmsimd_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vacc0 = wasm_f32x4_add(vacc0, vt);
}
vacc0 = wasm_f32x4_add(vacc0, wasm_v64x2_shuffle(vacc0, vacc0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vacc0 = wasm_f32x4_add(vacc0, vt);
}
vacc0 = wasm_f32x4_add(vacc0, wasm_v32x4_shuffle(vacc0, vacc0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vacc0 = wasm_f32x4_add(vacc0, vt);
}
const v128_t vscale = wasm_v128_load32_zero(¶ms->scalar.scale);
vacc0 = wasm_f32x4_mul(vacc0, vscale);
wasm_v128_store32_lane(output, vacc0, 0);
}
| 1,503 | 28.490196 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rsum/gen/f32-rsum-wasmsimd-x8-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rsum/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rsum_ukernel__wasmsimd_x8_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
v128_t vacc1 = wasm_f32x4_const_splat(0.0f);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
input += 8;
vacc0 = wasm_f32x4_add(vacc0, vt0);
vacc1 = wasm_f32x4_add(vacc1, vt1);
}
vacc0 = wasm_f32x4_add(vacc0, vacc1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vacc0 = wasm_f32x4_add(vacc0, vt);
}
vacc0 = wasm_f32x4_add(vacc0, wasm_v64x2_shuffle(vacc0, vacc0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vacc0 = wasm_f32x4_add(vacc0, vt);
}
vacc0 = wasm_f32x4_add(vacc0, wasm_v32x4_shuffle(vacc0, vacc0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vacc0 = wasm_f32x4_add(vacc0, vt);
}
const v128_t vscale = wasm_v128_load32_zero(¶ms->scalar.scale);
vacc0 = wasm_f32x4_mul(vacc0, vscale);
wasm_v128_store32_lane(output, vacc0, 0);
}
| 1,859 | 29.491803 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-12x1-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_12x1__neon(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 12 * sizeof(float);
while XNN_LIKELY(mc >= 12 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
float32x4_t vacc89AB = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
vacc89AB = vmlaq_f32(vacc89AB, vi89AB, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 12;
mc -= 12 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vmla_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vmla_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 7,300 | 36.060914 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-12x1-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_12x1__neonfma(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 12 * sizeof(float);
while XNN_LIKELY(mc >= 12 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
float32x4_t vacc89AB = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
vacc89AB = vfmaq_f32(vacc89AB, vi89AB, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 12;
mc -= 12 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vfma_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vfma_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 7,303 | 36.076142 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-12x2-minmax-aarch64-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon-blocked.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_12x2__aarch64_neonfma(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 12 * sizeof(float);
while XNN_LIKELY(mc >= 12 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123n0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n0 = vacc0123n0;
float32x4_t vacc89ABn0 = vacc0123n0;
float32x4_t vacc0123n1 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n1 = vacc0123n1;
float32x4_t vacc89ABn1 = vacc0123n1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x2_t vw = vld1_f32(w); w += 2;
xnn_prefetch_to_l1(w + 32);
vacc0123n0 = vfmaq_lane_f32(vacc0123n0, vi0123, vw, 0);
vacc4567n0 = vfmaq_lane_f32(vacc4567n0, vi4567, vw, 0);
vacc89ABn0 = vfmaq_lane_f32(vacc89ABn0, vi89AB, vw, 0);
vacc0123n1 = vfmaq_lane_f32(vacc0123n1, vi0123, vw, 1);
vacc4567n1 = vfmaq_lane_f32(vacc4567n1, vi4567, vw, 1);
vacc89ABn1 = vfmaq_lane_f32(vacc89ABn1, vi89AB, vw, 1);
} while (--nnz != 0);
}
float32x4_t vout0123n0 = vminq_f32(vacc0123n0, vmax);
float32x4_t vout4567n0 = vminq_f32(vacc4567n0, vmax);
float32x4_t vout89ABn0 = vminq_f32(vacc89ABn0, vmax);
float32x4_t vout0123n1 = vminq_f32(vacc0123n1, vmax);
float32x4_t vout4567n1 = vminq_f32(vacc4567n1, vmax);
float32x4_t vout89ABn1 = vminq_f32(vacc89ABn1, vmax);
vout0123n0 = vmaxq_f32(vout0123n0, vmin);
vout4567n0 = vmaxq_f32(vout4567n0, vmin);
vout89ABn0 = vmaxq_f32(vout89ABn0, vmin);
vout0123n1 = vmaxq_f32(vout0123n1, vmin);
vout4567n1 = vmaxq_f32(vout4567n1, vmin);
vout89ABn1 = vmaxq_f32(vout89ABn1, vmin);
vst1q_f32(output + 0, vout0123n0);
vst1q_f32(output + 4, vout4567n0);
vst1q_f32(output + 8, vout89ABn0);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n1);
vst1q_f32(output + 4, vout4567n1);
vst1q_f32(output + 8, vout89ABn1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 2;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
float32x4_t vacc89AB = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
vacc89AB = vfmaq_f32(vacc89AB, vi89AB, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
vst1q_f32(output + 0, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 12;
mc -= 12 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123n0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n0 = vacc0123n0;
float32x4_t vacc0123n1 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n1 = vacc0123n1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_f32(w); w += 2;
vacc0123n0 = vfmaq_lane_f32(vacc0123n0, vi0123, vw, 0);
vacc4567n0 = vfmaq_lane_f32(vacc4567n0, vi4567, vw, 0);
vacc0123n1 = vfmaq_lane_f32(vacc0123n1, vi0123, vw, 1);
vacc4567n1 = vfmaq_lane_f32(vacc4567n1, vi4567, vw, 1);
} while (--nnz != 0);
}
float32x4_t vout0123n0 = vminq_f32(vacc0123n0, vmax);
float32x4_t vout4567n0 = vminq_f32(vacc4567n0, vmax);
float32x4_t vout0123n1 = vminq_f32(vacc0123n1, vmax);
float32x4_t vout4567n1 = vminq_f32(vacc4567n1, vmax);
vout0123n0 = vmaxq_f32(vout0123n0, vmin);
vout4567n0 = vmaxq_f32(vout4567n0, vmin);
vout0123n1 = vmaxq_f32(vout0123n1, vmin);
vout4567n1 = vmaxq_f32(vout4567n1, vmin);
vst1q_f32(output + 0, vout0123n0);
vst1q_f32(output + 4, vout4567n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n1);
vst1q_f32(output + 4, vout4567n1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 2;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output + 0, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123n0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123n1 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_f32(w); w += 2;
vacc0123n0 = vfmaq_lane_f32(vacc0123n0, vi0123, vw, 0);
vacc0123n1 = vfmaq_lane_f32(vacc0123n1, vi0123, vw, 1);
} while (--nnz != 0);
}
float32x4_t vout0123n0 = vminq_f32(vacc0123n0, vmax);
float32x4_t vout0123n1 = vminq_f32(vacc0123n1, vmax);
vout0123n0 = vmaxq_f32(vout0123n0, vmin);
vout0123n1 = vmaxq_f32(vout0123n1, vmin);
vst1q_f32(output + 0, vout0123n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 2;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output + 0, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01n0 = vld1_dup_f32(w); w += 1;
float32x2_t vacc01n1 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_f32(w); w += 2;
vacc01n0 = vfma_lane_f32(vacc01n0, vi01, vw, 0);
vacc01n1 = vfma_lane_f32(vacc01n1, vi01, vw, 1);
} while (--nnz != 0);
}
float32x2_t vout01n0 = vmin_f32(vacc01n0, vget_low_f32(vmax));
float32x2_t vout01n1 = vmin_f32(vacc01n1, vget_low_f32(vmax));
vout01n0 = vmax_f32(vout01n0, vget_low_f32(vmin));
vout01n1 = vmax_f32(vout01n1, vget_low_f32(vmin));
vst1_f32(output + 0, vout01n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_f32(output + 0, vout01n1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 2;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vfma_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0n0 = vld1_dup_f32(w); w += 1;
float32x2_t vacc0n1 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_f32(w); w += 2;
vacc0n0 = vfma_lane_f32(vacc0n0, vi0, vw, 0);
vacc0n1 = vfma_lane_f32(vacc0n1, vi0, vw, 1);
} while (--nnz != 0);
}
float32x2_t vout0n0 = vmin_f32(vacc0n0, vget_low_f32(vmax));
float32x2_t vout0n1 = vmin_f32(vacc0n1, vget_low_f32(vmax));
vout0n0 = vmax_f32(vout0n0, vget_low_f32(vmin));
vout0n1 = vmax_f32(vout0n1, vget_low_f32(vmin));
vst1_lane_f32(output + 0, vout0n0, 0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_lane_f32(output + 0, vout0n1, 0);
output = (float*) ((uintptr_t) output + output_stride);
n -= 2;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vfma_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 15,321 | 37.401003 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-12x4-minmax-aarch64-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon-blocked.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_12x4__aarch64_neonfma(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 12 * sizeof(float);
while XNN_LIKELY(mc >= 12 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 4) {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123n0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n0 = vacc0123n0;
float32x4_t vacc89ABn0 = vacc0123n0;
float32x4_t vacc0123n1 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n1 = vacc0123n1;
float32x4_t vacc89ABn1 = vacc0123n1;
float32x4_t vacc0123n2 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n2 = vacc0123n2;
float32x4_t vacc89ABn2 = vacc0123n2;
float32x4_t vacc0123n3 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n3 = vacc0123n3;
float32x4_t vacc89ABn3 = vacc0123n3;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_f32(w); w += 4;
xnn_prefetch_to_l1(w + 32);
vacc0123n0 = vfmaq_laneq_f32(vacc0123n0, vi0123, vw, 0);
vacc4567n0 = vfmaq_laneq_f32(vacc4567n0, vi4567, vw, 0);
vacc89ABn0 = vfmaq_laneq_f32(vacc89ABn0, vi89AB, vw, 0);
vacc0123n1 = vfmaq_laneq_f32(vacc0123n1, vi0123, vw, 1);
vacc4567n1 = vfmaq_laneq_f32(vacc4567n1, vi4567, vw, 1);
vacc89ABn1 = vfmaq_laneq_f32(vacc89ABn1, vi89AB, vw, 1);
vacc0123n2 = vfmaq_laneq_f32(vacc0123n2, vi0123, vw, 2);
vacc4567n2 = vfmaq_laneq_f32(vacc4567n2, vi4567, vw, 2);
vacc89ABn2 = vfmaq_laneq_f32(vacc89ABn2, vi89AB, vw, 2);
vacc0123n3 = vfmaq_laneq_f32(vacc0123n3, vi0123, vw, 3);
vacc4567n3 = vfmaq_laneq_f32(vacc4567n3, vi4567, vw, 3);
vacc89ABn3 = vfmaq_laneq_f32(vacc89ABn3, vi89AB, vw, 3);
} while (--nnz != 0);
}
float32x4_t vout0123n0 = vminq_f32(vacc0123n0, vmax);
float32x4_t vout4567n0 = vminq_f32(vacc4567n0, vmax);
float32x4_t vout89ABn0 = vminq_f32(vacc89ABn0, vmax);
float32x4_t vout0123n1 = vminq_f32(vacc0123n1, vmax);
float32x4_t vout4567n1 = vminq_f32(vacc4567n1, vmax);
float32x4_t vout89ABn1 = vminq_f32(vacc89ABn1, vmax);
float32x4_t vout0123n2 = vminq_f32(vacc0123n2, vmax);
float32x4_t vout4567n2 = vminq_f32(vacc4567n2, vmax);
float32x4_t vout89ABn2 = vminq_f32(vacc89ABn2, vmax);
float32x4_t vout0123n3 = vminq_f32(vacc0123n3, vmax);
float32x4_t vout4567n3 = vminq_f32(vacc4567n3, vmax);
float32x4_t vout89ABn3 = vminq_f32(vacc89ABn3, vmax);
vout0123n0 = vmaxq_f32(vout0123n0, vmin);
vout4567n0 = vmaxq_f32(vout4567n0, vmin);
vout89ABn0 = vmaxq_f32(vout89ABn0, vmin);
vout0123n1 = vmaxq_f32(vout0123n1, vmin);
vout4567n1 = vmaxq_f32(vout4567n1, vmin);
vout89ABn1 = vmaxq_f32(vout89ABn1, vmin);
vout0123n2 = vmaxq_f32(vout0123n2, vmin);
vout4567n2 = vmaxq_f32(vout4567n2, vmin);
vout89ABn2 = vmaxq_f32(vout89ABn2, vmin);
vout0123n3 = vmaxq_f32(vout0123n3, vmin);
vout4567n3 = vmaxq_f32(vout4567n3, vmin);
vout89ABn3 = vmaxq_f32(vout89ABn3, vmin);
vst1q_f32(output + 0, vout0123n0);
vst1q_f32(output + 4, vout4567n0);
vst1q_f32(output + 8, vout89ABn0);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n1);
vst1q_f32(output + 4, vout4567n1);
vst1q_f32(output + 8, vout89ABn1);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n2);
vst1q_f32(output + 4, vout4567n2);
vst1q_f32(output + 8, vout89ABn2);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n3);
vst1q_f32(output + 4, vout4567n3);
vst1q_f32(output + 8, vout89ABn3);
output = (float*) ((uintptr_t) output + output_stride);
n -= 4;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
float32x4_t vacc89AB = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
vacc89AB = vfmaq_f32(vacc89AB, vi89AB, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
vst1q_f32(output + 0, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 12;
mc -= 12 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 4) {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123n0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n0 = vacc0123n0;
float32x4_t vacc0123n1 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n1 = vacc0123n1;
float32x4_t vacc0123n2 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n2 = vacc0123n2;
float32x4_t vacc0123n3 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n3 = vacc0123n3;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_f32(w); w += 4;
vacc0123n0 = vfmaq_laneq_f32(vacc0123n0, vi0123, vw, 0);
vacc4567n0 = vfmaq_laneq_f32(vacc4567n0, vi4567, vw, 0);
vacc0123n1 = vfmaq_laneq_f32(vacc0123n1, vi0123, vw, 1);
vacc4567n1 = vfmaq_laneq_f32(vacc4567n1, vi4567, vw, 1);
vacc0123n2 = vfmaq_laneq_f32(vacc0123n2, vi0123, vw, 2);
vacc4567n2 = vfmaq_laneq_f32(vacc4567n2, vi4567, vw, 2);
vacc0123n3 = vfmaq_laneq_f32(vacc0123n3, vi0123, vw, 3);
vacc4567n3 = vfmaq_laneq_f32(vacc4567n3, vi4567, vw, 3);
} while (--nnz != 0);
}
float32x4_t vout0123n0 = vminq_f32(vacc0123n0, vmax);
float32x4_t vout4567n0 = vminq_f32(vacc4567n0, vmax);
float32x4_t vout0123n1 = vminq_f32(vacc0123n1, vmax);
float32x4_t vout4567n1 = vminq_f32(vacc4567n1, vmax);
float32x4_t vout0123n2 = vminq_f32(vacc0123n2, vmax);
float32x4_t vout4567n2 = vminq_f32(vacc4567n2, vmax);
float32x4_t vout0123n3 = vminq_f32(vacc0123n3, vmax);
float32x4_t vout4567n3 = vminq_f32(vacc4567n3, vmax);
vout0123n0 = vmaxq_f32(vout0123n0, vmin);
vout4567n0 = vmaxq_f32(vout4567n0, vmin);
vout0123n1 = vmaxq_f32(vout0123n1, vmin);
vout4567n1 = vmaxq_f32(vout4567n1, vmin);
vout0123n2 = vmaxq_f32(vout0123n2, vmin);
vout4567n2 = vmaxq_f32(vout4567n2, vmin);
vout0123n3 = vmaxq_f32(vout0123n3, vmin);
vout4567n3 = vmaxq_f32(vout4567n3, vmin);
vst1q_f32(output + 0, vout0123n0);
vst1q_f32(output + 4, vout4567n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n1);
vst1q_f32(output + 4, vout4567n1);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n2);
vst1q_f32(output + 4, vout4567n2);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n3);
vst1q_f32(output + 4, vout4567n3);
output = (float*) ((uintptr_t) output + output_stride);
n -= 4;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output + 0, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 4) {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123n0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123n1 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123n2 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123n3 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_f32(w); w += 4;
vacc0123n0 = vfmaq_laneq_f32(vacc0123n0, vi0123, vw, 0);
vacc0123n1 = vfmaq_laneq_f32(vacc0123n1, vi0123, vw, 1);
vacc0123n2 = vfmaq_laneq_f32(vacc0123n2, vi0123, vw, 2);
vacc0123n3 = vfmaq_laneq_f32(vacc0123n3, vi0123, vw, 3);
} while (--nnz != 0);
}
float32x4_t vout0123n0 = vminq_f32(vacc0123n0, vmax);
float32x4_t vout0123n1 = vminq_f32(vacc0123n1, vmax);
float32x4_t vout0123n2 = vminq_f32(vacc0123n2, vmax);
float32x4_t vout0123n3 = vminq_f32(vacc0123n3, vmax);
vout0123n0 = vmaxq_f32(vout0123n0, vmin);
vout0123n1 = vmaxq_f32(vout0123n1, vmin);
vout0123n2 = vmaxq_f32(vout0123n2, vmin);
vout0123n3 = vmaxq_f32(vout0123n3, vmin);
vst1q_f32(output + 0, vout0123n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n1);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n2);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n3);
output = (float*) ((uintptr_t) output + output_stride);
n -= 4;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output + 0, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 4) {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01n0 = vld1_dup_f32(w); w += 1;
float32x2_t vacc01n1 = vld1_dup_f32(w); w += 1;
float32x2_t vacc01n2 = vld1_dup_f32(w); w += 1;
float32x2_t vacc01n3 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_f32(w); w += 4;
vacc01n0 = vfma_laneq_f32(vacc01n0, vi01, vw, 0);
vacc01n1 = vfma_laneq_f32(vacc01n1, vi01, vw, 1);
vacc01n2 = vfma_laneq_f32(vacc01n2, vi01, vw, 2);
vacc01n3 = vfma_laneq_f32(vacc01n3, vi01, vw, 3);
} while (--nnz != 0);
}
float32x2_t vout01n0 = vmin_f32(vacc01n0, vget_low_f32(vmax));
float32x2_t vout01n1 = vmin_f32(vacc01n1, vget_low_f32(vmax));
float32x2_t vout01n2 = vmin_f32(vacc01n2, vget_low_f32(vmax));
float32x2_t vout01n3 = vmin_f32(vacc01n3, vget_low_f32(vmax));
vout01n0 = vmax_f32(vout01n0, vget_low_f32(vmin));
vout01n1 = vmax_f32(vout01n1, vget_low_f32(vmin));
vout01n2 = vmax_f32(vout01n2, vget_low_f32(vmin));
vout01n3 = vmax_f32(vout01n3, vget_low_f32(vmin));
vst1_f32(output + 0, vout01n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_f32(output + 0, vout01n1);
output = (float*) ((uintptr_t) output + output_stride);
vst1_f32(output + 0, vout01n2);
output = (float*) ((uintptr_t) output + output_stride);
vst1_f32(output + 0, vout01n3);
output = (float*) ((uintptr_t) output + output_stride);
n -= 4;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vfma_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 4) {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0n0 = vld1_dup_f32(w); w += 1;
float32x2_t vacc0n1 = vld1_dup_f32(w); w += 1;
float32x2_t vacc0n2 = vld1_dup_f32(w); w += 1;
float32x2_t vacc0n3 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_f32(w); w += 4;
vacc0n0 = vfma_laneq_f32(vacc0n0, vi0, vw, 0);
vacc0n1 = vfma_laneq_f32(vacc0n1, vi0, vw, 1);
vacc0n2 = vfma_laneq_f32(vacc0n2, vi0, vw, 2);
vacc0n3 = vfma_laneq_f32(vacc0n3, vi0, vw, 3);
} while (--nnz != 0);
}
float32x2_t vout0n0 = vmin_f32(vacc0n0, vget_low_f32(vmax));
float32x2_t vout0n1 = vmin_f32(vacc0n1, vget_low_f32(vmax));
float32x2_t vout0n2 = vmin_f32(vacc0n2, vget_low_f32(vmax));
float32x2_t vout0n3 = vmin_f32(vacc0n3, vget_low_f32(vmax));
vout0n0 = vmax_f32(vout0n0, vget_low_f32(vmin));
vout0n1 = vmax_f32(vout0n1, vget_low_f32(vmin));
vout0n2 = vmax_f32(vout0n2, vget_low_f32(vmin));
vout0n3 = vmax_f32(vout0n3, vget_low_f32(vmin));
vst1_lane_f32(output + 0, vout0n0, 0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_lane_f32(output + 0, vout0n1, 0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_lane_f32(output + 0, vout0n2, 0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_lane_f32(output + 0, vout0n3, 0);
output = (float*) ((uintptr_t) output + output_stride);
n -= 4;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vfma_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 20,380 | 40.678937 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-16x1-minmax-neon-pipelined.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_16x1__neon_pipelined(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 16 * sizeof(float);
while XNN_LIKELY(mc >= 16 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
float32x4_t vw = vld1q_dup_f32(w); w += 1;
intptr_t diff = *dmap++;
float32x4_t vi0123 = vld1q_f32(input);
float32x4_t vi4567 = vld1q_f32(input + 4);
float32x4_t vi89AB = vld1q_f32(input + 8);
float32x4_t viCDEF = vld1q_f32(input + 12);
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vw;
float32x4_t vacc4567 = vw;
float32x4_t vacc89AB = vw;
float32x4_t vaccCDEF = vw;
vw = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
vacc89AB = vmlaq_f32(vacc89AB, vi89AB, vw);
vaccCDEF = vmlaq_f32(vaccCDEF, viCDEF, vw);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
diff = *dmap++;
vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vi0123 = vld1q_f32(input);
vi4567 = vld1q_f32(input + 4);
vi89AB = vld1q_f32(input + 8);
viCDEF = vld1q_f32(input + 12);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
voutCDEF = vmaxq_f32(voutCDEF, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
vst1q_f32(output + 12, voutCDEF);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 16;
mc -= 16 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vb = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vmlaq_f32(vacc0123, vi0123, vb);
vacc4567 = vmlaq_f32(vacc4567, vi4567, vb);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vb = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vmlaq_f32(vacc0123, vi0123, vb);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x2_t vb = vld1_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc01 = vmla_f32(vacc01, vi01, vb);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x2_t vb = vld1_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0 = vmla_f32(vacc0, vi0, vb);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 8,102 | 36.169725 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-16x1-minmax-neon-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_16x1__neon_x2(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 16 * sizeof(float);
while XNN_LIKELY(mc >= 16 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123x0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123x1 = vmovq_n_f32(0.0f);
float32x4_t vacc4567x0 = vacc0123x0;
float32x4_t vacc4567x1 = vmovq_n_f32(0.0f);
float32x4_t vacc89ABx0 = vacc0123x0;
float32x4_t vacc89ABx1 = vmovq_n_f32(0.0f);
float32x4_t vaccCDEFx0 = vacc0123x0;
float32x4_t vaccCDEFx1 = vmovq_n_f32(0.0f);
for (; nnz >= 2; nnz -= 2) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
dmap += 2;
const float32x4_t vi0123x0 = vld1q_f32(input);
const float32x4_t vi4567x0 = vld1q_f32(input + 4);
const float32x4_t vi89ABx0 = vld1q_f32(input + 8);
const float32x4_t viCDEFx0 = vld1q_f32(input + 12);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff0);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw0 = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123x0 = vmlaq_f32(vacc0123x0, vi0123x0, vw0);
vacc4567x0 = vmlaq_f32(vacc4567x0, vi4567x0, vw0);
vacc89ABx0 = vmlaq_f32(vacc89ABx0, vi89ABx0, vw0);
vaccCDEFx0 = vmlaq_f32(vaccCDEFx0, viCDEFx0, vw0);
const float32x4_t vi0123x1 = vld1q_f32(input);
const float32x4_t vi4567x1 = vld1q_f32(input + 4);
const float32x4_t vi89ABx1 = vld1q_f32(input + 8);
const float32x4_t viCDEFx1 = vld1q_f32(input + 12);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff1);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw1 = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123x1 = vmlaq_f32(vacc0123x1, vi0123x1, vw1);
vacc4567x1 = vmlaq_f32(vacc4567x1, vi4567x1, vw1);
vacc89ABx1 = vmlaq_f32(vacc89ABx1, vi89ABx1, vw1);
vaccCDEFx1 = vmlaq_f32(vaccCDEFx1, viCDEFx1, vw1);
}
float32x4_t vacc0123 = vacc0123x0;
float32x4_t vacc4567 = vacc4567x0;
float32x4_t vacc89AB = vacc89ABx0;
float32x4_t vaccCDEF = vaccCDEFx0;
vacc0123 = vaddq_f32(vacc0123, vacc0123x1);
vacc4567 = vaddq_f32(vacc4567, vacc4567x1);
vacc89AB = vaddq_f32(vacc89AB, vacc89ABx1);
vaccCDEF = vaddq_f32(vaccCDEF, vaccCDEFx1);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
const float32x4_t viCDEF = vld1q_f32(input + 12);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
vacc89AB = vmlaq_f32(vacc89AB, vi89AB, vw);
vaccCDEF = vmlaq_f32(vaccCDEF, viCDEF, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
voutCDEF = vmaxq_f32(voutCDEF, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
vst1q_f32(output + 12, voutCDEF);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 16;
mc -= 16 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vmla_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vmla_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 9,666 | 38.618852 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-16x1-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_16x1__neon(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 16 * sizeof(float);
while XNN_LIKELY(mc >= 16 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
float32x4_t vacc89AB = vacc0123;
float32x4_t vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
const float32x4_t viCDEF = vld1q_f32(input + 12);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
vacc89AB = vmlaq_f32(vacc89AB, vi89AB, vw);
vaccCDEF = vmlaq_f32(vaccCDEF, viCDEF, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
voutCDEF = vmaxq_f32(voutCDEF, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
vst1q_f32(output + 12, voutCDEF);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 16;
mc -= 16 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vmla_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vmla_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 7,593 | 36.408867 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-16x1-minmax-neonfma-pipelined.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_16x1__neonfma_pipelined(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 16 * sizeof(float);
while XNN_LIKELY(mc >= 16 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
float32x4_t vw = vld1q_dup_f32(w); w += 1;
intptr_t diff = *dmap++;
float32x4_t vi0123 = vld1q_f32(input);
float32x4_t vi4567 = vld1q_f32(input + 4);
float32x4_t vi89AB = vld1q_f32(input + 8);
float32x4_t viCDEF = vld1q_f32(input + 12);
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vw;
float32x4_t vacc4567 = vw;
float32x4_t vacc89AB = vw;
float32x4_t vaccCDEF = vw;
vw = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
vacc89AB = vfmaq_f32(vacc89AB, vi89AB, vw);
vaccCDEF = vfmaq_f32(vaccCDEF, viCDEF, vw);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
diff = *dmap++;
vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vi0123 = vld1q_f32(input);
vi4567 = vld1q_f32(input + 4);
vi89AB = vld1q_f32(input + 8);
viCDEF = vld1q_f32(input + 12);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
voutCDEF = vmaxq_f32(voutCDEF, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
vst1q_f32(output + 12, voutCDEF);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 16;
mc -= 16 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vb = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vfmaq_f32(vacc0123, vi0123, vb);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vb);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vb = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vfmaq_f32(vacc0123, vi0123, vb);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x2_t vb = vld1_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc01 = vfma_f32(vacc01, vi01, vb);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x2_t vb = vld1_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0 = vfma_f32(vacc0, vi0, vb);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 8,105 | 36.183486 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-16x1-minmax-neonfma-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_16x1__neonfma_x2(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 16 * sizeof(float);
while XNN_LIKELY(mc >= 16 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123x0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123x1 = vmovq_n_f32(0.0f);
float32x4_t vacc4567x0 = vacc0123x0;
float32x4_t vacc4567x1 = vmovq_n_f32(0.0f);
float32x4_t vacc89ABx0 = vacc0123x0;
float32x4_t vacc89ABx1 = vmovq_n_f32(0.0f);
float32x4_t vaccCDEFx0 = vacc0123x0;
float32x4_t vaccCDEFx1 = vmovq_n_f32(0.0f);
for (; nnz >= 2; nnz -= 2) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
dmap += 2;
const float32x4_t vi0123x0 = vld1q_f32(input);
const float32x4_t vi4567x0 = vld1q_f32(input + 4);
const float32x4_t vi89ABx0 = vld1q_f32(input + 8);
const float32x4_t viCDEFx0 = vld1q_f32(input + 12);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff0);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw0 = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123x0 = vfmaq_f32(vacc0123x0, vi0123x0, vw0);
vacc4567x0 = vfmaq_f32(vacc4567x0, vi4567x0, vw0);
vacc89ABx0 = vfmaq_f32(vacc89ABx0, vi89ABx0, vw0);
vaccCDEFx0 = vfmaq_f32(vaccCDEFx0, viCDEFx0, vw0);
const float32x4_t vi0123x1 = vld1q_f32(input);
const float32x4_t vi4567x1 = vld1q_f32(input + 4);
const float32x4_t vi89ABx1 = vld1q_f32(input + 8);
const float32x4_t viCDEFx1 = vld1q_f32(input + 12);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff1);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw1 = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123x1 = vfmaq_f32(vacc0123x1, vi0123x1, vw1);
vacc4567x1 = vfmaq_f32(vacc4567x1, vi4567x1, vw1);
vacc89ABx1 = vfmaq_f32(vacc89ABx1, vi89ABx1, vw1);
vaccCDEFx1 = vfmaq_f32(vaccCDEFx1, viCDEFx1, vw1);
}
float32x4_t vacc0123 = vacc0123x0;
float32x4_t vacc4567 = vacc4567x0;
float32x4_t vacc89AB = vacc89ABx0;
float32x4_t vaccCDEF = vaccCDEFx0;
vacc0123 = vaddq_f32(vacc0123, vacc0123x1);
vacc4567 = vaddq_f32(vacc4567, vacc4567x1);
vacc89AB = vaddq_f32(vacc89AB, vacc89ABx1);
vaccCDEF = vaddq_f32(vaccCDEF, vaccCDEFx1);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
const float32x4_t viCDEF = vld1q_f32(input + 12);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
vacc89AB = vfmaq_f32(vacc89AB, vi89AB, vw);
vaccCDEF = vfmaq_f32(vaccCDEF, viCDEF, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
voutCDEF = vmaxq_f32(voutCDEF, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
vst1q_f32(output + 12, voutCDEF);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 16;
mc -= 16 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vfma_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vfma_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 9,669 | 38.631148 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-16x1-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_16x1__neonfma(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 16 * sizeof(float);
while XNN_LIKELY(mc >= 16 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
float32x4_t vacc89AB = vacc0123;
float32x4_t vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
const float32x4_t viCDEF = vld1q_f32(input + 12);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
vacc89AB = vfmaq_f32(vacc89AB, vi89AB, vw);
vaccCDEF = vfmaq_f32(vaccCDEF, viCDEF, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
voutCDEF = vmaxq_f32(voutCDEF, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
vst1q_f32(output + 12, voutCDEF);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 16;
mc -= 16 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vfma_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vfma_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 7,596 | 36.423645 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-16x1-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_16x1__sse(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const __m128 vmin = _mm_load_ps(params->sse.min);
const __m128 vmax = _mm_load_ps(params->sse.max);
size_t output_decrement = output_stride * nc - 16 * sizeof(float);
while XNN_LIKELY(mc >= 16 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
__m128 vacc0123 = _mm_load1_ps(w); w += 1;
__m128 vacc4567 = vacc0123;
__m128 vacc89AB = vacc0123;
__m128 vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const __m128 vi0123 = _mm_loadu_ps(input);
const __m128 vi4567 = _mm_loadu_ps(input + 4);
const __m128 vi89AB = _mm_loadu_ps(input + 8);
const __m128 viCDEF = _mm_loadu_ps(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const __m128 vw = _mm_load1_ps(w); w += 1;
vacc0123 = _mm_add_ps(vacc0123, _mm_mul_ps(vi0123, vw));
vacc4567 = _mm_add_ps(vacc4567, _mm_mul_ps(vi4567, vw));
vacc89AB = _mm_add_ps(vacc89AB, _mm_mul_ps(vi89AB, vw));
vaccCDEF = _mm_add_ps(vaccCDEF, _mm_mul_ps(viCDEF, vw));
} while (--nnz != 0);
}
__m128 vout0123 = _mm_min_ps(vacc0123, vmax);
__m128 vout4567 = _mm_min_ps(vacc4567, vmax);
__m128 vout89AB = _mm_min_ps(vacc89AB, vmax);
__m128 voutCDEF = _mm_min_ps(vaccCDEF, vmax);
vout0123 = _mm_max_ps(vout0123, vmin);
vout4567 = _mm_max_ps(vout4567, vmin);
vout89AB = _mm_max_ps(vout89AB, vmin);
voutCDEF = _mm_max_ps(voutCDEF, vmin);
_mm_storeu_ps(output, vout0123);
_mm_storeu_ps(output + 4, vout4567);
_mm_storeu_ps(output + 8, vout89AB);
_mm_storeu_ps(output + 12, voutCDEF);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 16;
mc -= 16 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
__m128 vacc0123 = _mm_load1_ps(w); w += 1;
__m128 vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const __m128 vi0123 = _mm_loadu_ps(input);
const __m128 vi4567 = _mm_loadu_ps(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const __m128 vw = _mm_load1_ps(w); w += 1;
vacc0123 = _mm_add_ps(vacc0123, _mm_mul_ps(vi0123, vw));
vacc4567 = _mm_add_ps(vacc4567, _mm_mul_ps(vi4567, vw));
} while (--nnz != 0);
}
__m128 vout0123 = _mm_min_ps(vacc0123, vmax);
__m128 vout4567 = _mm_min_ps(vacc4567, vmax);
vout0123 = _mm_max_ps(vout0123, vmin);
vout4567 = _mm_max_ps(vout4567, vmin);
_mm_storeu_ps(output, vout0123);
_mm_storeu_ps(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
__m128 vacc0123 = _mm_load1_ps(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const __m128 vi0123 = _mm_loadu_ps(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const __m128 vw = _mm_load1_ps(w); w += 1;
vacc0123 = _mm_add_ps(vacc0123, _mm_mul_ps(vi0123, vw));
} while (--nnz != 0);
}
__m128 vout0123 = _mm_min_ps(vacc0123, vmax);
vout0123 = _mm_max_ps(vout0123, vmin);
_mm_storeu_ps(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
__m128 vacc01 = _mm_load_ss(w); w += 1;
vacc01 = _mm_unpacklo_ps(vacc01, vacc01);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const __m128 vi01 = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
__m128 vw = _mm_load_ss(w); w += 1;
vw = _mm_unpacklo_ps(vw, vw);
vacc01 = _mm_add_ps(vacc01, _mm_mul_ps(vi01, vw));
} while (--nnz != 0);
}
__m128 vout01 = _mm_min_ps(vacc01, vmax);
vout01 = _mm_max_ps(vout01, vmin);
_mm_storel_pi((__m64*) output, vout01);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
__m128 vacc0 = _mm_load_ss(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const __m128 vi0 = _mm_load_ss(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const __m128 vw = _mm_load_ss(w); w += 1;
vacc0 = _mm_add_ss(vacc0, _mm_mul_ss(vi0, vw));
} while (--nnz != 0);
}
__m128 vout0 = _mm_min_ss(vacc0, vmax);
vout0 = _mm_max_ss(vout0, vmin);
_mm_store_ss(output, vout0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 7,386 | 37.07732 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-16x1-minmax-wasmsimd-arm-pipelined-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_arm_pipelined_x2(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 16 * sizeof(float);
while XNN_LIKELY(mc >= 16 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
v128_t vw = wasm_v128_load32_splat(w); w += 1;
intptr_t diff = *dmap++;
v128_t vi0123 = wasm_v128_load(input + 0);
v128_t vi4567 = wasm_v128_load(input + 4);
v128_t vi89AB = wasm_v128_load(input + 8);
v128_t viCDEF = wasm_v128_load(input + 12);
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = vw;
v128_t vacc4567 = vw;
v128_t vacc89AB = vw;
v128_t vaccCDEF = vw;
vw = wasm_v128_load32_splat(w); w += 1;
for (; nnz >= 2; nnz -= 2) {
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vi4567 = wasm_v128_load(input + 4);
vi89AB = wasm_v128_load(input + 8);
viCDEF = wasm_v128_load(input + 12);
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vi4567 = wasm_v128_load(input + 4);
vi89AB = wasm_v128_load(input + 8);
viCDEF = wasm_v128_load(input + 12);
}
if XNN_LIKELY(nnz != 0) {
do {
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vi4567 = wasm_v128_load(input + 4);
vi89AB = wasm_v128_load(input + 8);
viCDEF = wasm_v128_load(input + 12);
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_min(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_min(vmax, vaccCDEF);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
vout89AB = wasm_f32x4_max(vmin, vout89AB);
voutCDEF = wasm_f32x4_max(vmin, voutCDEF);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 16;
mc -= 16 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
vout0123 = wasm_f32x4_max(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_min(vmax, vacc01);
vout01 = wasm_f32x4_max(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_min(vmax, vacc0);
vout0 = wasm_f32x4_max(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 9,150 | 38.786957 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-16x1-minmax-wasmsimd-arm-pipelined.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_arm_pipelined(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 16 * sizeof(float);
while XNN_LIKELY(mc >= 16 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
v128_t vw = wasm_v128_load32_splat(w); w += 1;
intptr_t diff = *dmap++;
v128_t vi0123 = wasm_v128_load(input + 0);
v128_t vi4567 = wasm_v128_load(input + 4);
v128_t vi89AB = wasm_v128_load(input + 8);
v128_t viCDEF = wasm_v128_load(input + 12);
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = vw;
v128_t vacc4567 = vw;
v128_t vacc89AB = vw;
v128_t vaccCDEF = vw;
vw = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vi4567 = wasm_v128_load(input + 4);
vi89AB = wasm_v128_load(input + 8);
viCDEF = wasm_v128_load(input + 12);
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_min(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_min(vmax, vaccCDEF);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
vout89AB = wasm_f32x4_max(vmin, vout89AB);
voutCDEF = wasm_f32x4_max(vmin, voutCDEF);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 16;
mc -= 16 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
vout0123 = wasm_f32x4_max(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_min(vmax, vacc01);
vout01 = wasm_f32x4_max(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_min(vmax, vacc0);
vout0 = wasm_f32x4_max(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 7,848 | 37.101942 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-16x1-minmax-wasmsimd-arm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_arm_x2(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 16 * sizeof(float);
while XNN_LIKELY(mc >= 16 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123x0 = wasm_v128_load32_splat(w);
w += 1;
v128_t vacc0123x1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc4567x0 = vacc0123x0;
v128_t vacc4567x1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc89ABx0 = vacc0123x0;
v128_t vacc89ABx1 = wasm_f32x4_const_splat(0.0f);
v128_t vaccCDEFx0 = vacc0123x0;
v128_t vaccCDEFx1 = wasm_f32x4_const_splat(0.0f);
for (; nnz >= 2; nnz -= 2) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
dmap += 2;
const v128_t vi0123x0 = wasm_v128_load(input);
const v128_t vi4567x0 = wasm_v128_load(input + 4);
const v128_t vi89ABx0 = wasm_v128_load(input + 8);
const v128_t viCDEFx0 = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
const v128_t vw0 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x0 = wasm_f32x4_add(vacc0123x0, wasm_f32x4_mul(vi0123x0, vw0));
vacc4567x0 = wasm_f32x4_add(vacc4567x0, wasm_f32x4_mul(vi4567x0, vw0));
vacc89ABx0 = wasm_f32x4_add(vacc89ABx0, wasm_f32x4_mul(vi89ABx0, vw0));
vaccCDEFx0 = wasm_f32x4_add(vaccCDEFx0, wasm_f32x4_mul(viCDEFx0, vw0));
const v128_t vi0123x1 = wasm_v128_load(input);
const v128_t vi4567x1 = wasm_v128_load(input + 4);
const v128_t vi89ABx1 = wasm_v128_load(input + 8);
const v128_t viCDEFx1 = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
const v128_t vw1 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x1 = wasm_f32x4_add(vacc0123x1, wasm_f32x4_mul(vi0123x1, vw1));
vacc4567x1 = wasm_f32x4_add(vacc4567x1, wasm_f32x4_mul(vi4567x1, vw1));
vacc89ABx1 = wasm_f32x4_add(vacc89ABx1, wasm_f32x4_mul(vi89ABx1, vw1));
vaccCDEFx1 = wasm_f32x4_add(vaccCDEFx1, wasm_f32x4_mul(viCDEFx1, vw1));
}
v128_t vacc0123 = vacc0123x0;
v128_t vacc4567 = vacc4567x0;
v128_t vacc89AB = vacc89ABx0;
v128_t vaccCDEF = vaccCDEFx0;
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x1);
vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x1);
vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx1);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx1);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_min(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_min(vmax, vaccCDEF);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
vout89AB = wasm_f32x4_max(vmin, vout89AB);
voutCDEF = wasm_f32x4_max(vmin, voutCDEF);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 16;
mc -= 16 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
vout0123 = wasm_f32x4_max(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_min(vmax, vacc01);
vout01 = wasm_f32x4_max(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_min(vmax, vacc0);
vout0 = wasm_f32x4_max(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 9,790 | 40.487288 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-16x1-minmax-wasmsimd-arm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_arm_x4(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 16 * sizeof(float);
while XNN_LIKELY(mc >= 16 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123x0 = wasm_v128_load32_splat(w);
w += 1;
v128_t vacc0123x1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc0123x2 = wasm_f32x4_const_splat(0.0f);
v128_t vacc0123x3 = wasm_f32x4_const_splat(0.0f);
v128_t vacc4567x0 = vacc0123x0;
v128_t vacc4567x1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc4567x2 = wasm_f32x4_const_splat(0.0f);
v128_t vacc4567x3 = wasm_f32x4_const_splat(0.0f);
v128_t vacc89ABx0 = vacc0123x0;
v128_t vacc89ABx1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc89ABx2 = wasm_f32x4_const_splat(0.0f);
v128_t vacc89ABx3 = wasm_f32x4_const_splat(0.0f);
v128_t vaccCDEFx0 = vacc0123x0;
v128_t vaccCDEFx1 = wasm_f32x4_const_splat(0.0f);
v128_t vaccCDEFx2 = wasm_f32x4_const_splat(0.0f);
v128_t vaccCDEFx3 = wasm_f32x4_const_splat(0.0f);
for (; nnz >= 4; nnz -= 4) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
const intptr_t diff2 = dmap[2];
const intptr_t diff3 = dmap[3];
dmap += 4;
const v128_t vi0123x0 = wasm_v128_load(input);
const v128_t vi4567x0 = wasm_v128_load(input + 4);
const v128_t vi89ABx0 = wasm_v128_load(input + 8);
const v128_t viCDEFx0 = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
const v128_t vw0 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x0 = wasm_f32x4_add(vacc0123x0, wasm_f32x4_mul(vi0123x0, vw0));
vacc4567x0 = wasm_f32x4_add(vacc4567x0, wasm_f32x4_mul(vi4567x0, vw0));
vacc89ABx0 = wasm_f32x4_add(vacc89ABx0, wasm_f32x4_mul(vi89ABx0, vw0));
vaccCDEFx0 = wasm_f32x4_add(vaccCDEFx0, wasm_f32x4_mul(viCDEFx0, vw0));
const v128_t vi0123x1 = wasm_v128_load(input);
const v128_t vi4567x1 = wasm_v128_load(input + 4);
const v128_t vi89ABx1 = wasm_v128_load(input + 8);
const v128_t viCDEFx1 = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
const v128_t vw1 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x1 = wasm_f32x4_add(vacc0123x1, wasm_f32x4_mul(vi0123x1, vw1));
vacc4567x1 = wasm_f32x4_add(vacc4567x1, wasm_f32x4_mul(vi4567x1, vw1));
vacc89ABx1 = wasm_f32x4_add(vacc89ABx1, wasm_f32x4_mul(vi89ABx1, vw1));
vaccCDEFx1 = wasm_f32x4_add(vaccCDEFx1, wasm_f32x4_mul(viCDEFx1, vw1));
const v128_t vi0123x2 = wasm_v128_load(input);
const v128_t vi4567x2 = wasm_v128_load(input + 4);
const v128_t vi89ABx2 = wasm_v128_load(input + 8);
const v128_t viCDEFx2 = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff2);
const v128_t vw2 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x2 = wasm_f32x4_add(vacc0123x2, wasm_f32x4_mul(vi0123x2, vw2));
vacc4567x2 = wasm_f32x4_add(vacc4567x2, wasm_f32x4_mul(vi4567x2, vw2));
vacc89ABx2 = wasm_f32x4_add(vacc89ABx2, wasm_f32x4_mul(vi89ABx2, vw2));
vaccCDEFx2 = wasm_f32x4_add(vaccCDEFx2, wasm_f32x4_mul(viCDEFx2, vw2));
const v128_t vi0123x3 = wasm_v128_load(input);
const v128_t vi4567x3 = wasm_v128_load(input + 4);
const v128_t vi89ABx3 = wasm_v128_load(input + 8);
const v128_t viCDEFx3 = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff3);
const v128_t vw3 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x3 = wasm_f32x4_add(vacc0123x3, wasm_f32x4_mul(vi0123x3, vw3));
vacc4567x3 = wasm_f32x4_add(vacc4567x3, wasm_f32x4_mul(vi4567x3, vw3));
vacc89ABx3 = wasm_f32x4_add(vacc89ABx3, wasm_f32x4_mul(vi89ABx3, vw3));
vaccCDEFx3 = wasm_f32x4_add(vaccCDEFx3, wasm_f32x4_mul(viCDEFx3, vw3));
}
v128_t vacc0123 = vacc0123x0;
v128_t vacc4567 = vacc4567x0;
v128_t vacc89AB = vacc89ABx0;
v128_t vaccCDEF = vaccCDEFx0;
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x1);
vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x1);
vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx1);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx1);
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x2);
vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x2);
vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx2);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx2);
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x3);
vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x3);
vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx3);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx3);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_min(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_min(vmax, vaccCDEF);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
vout89AB = wasm_f32x4_max(vmin, vout89AB);
voutCDEF = wasm_f32x4_max(vmin, voutCDEF);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 16;
mc -= 16 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
vout0123 = wasm_f32x4_max(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_min(vmax, vacc01);
vout01 = wasm_f32x4_max(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_min(vmax, vacc0);
vout0 = wasm_f32x4_max(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 12,164 | 43.076087 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-16x1-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_arm(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 16 * sizeof(float);
while XNN_LIKELY(mc >= 16 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
v128_t vacc89AB = vacc0123;
v128_t vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_min(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_min(vmax, vaccCDEF);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
vout89AB = wasm_f32x4_max(vmin, vout89AB);
voutCDEF = wasm_f32x4_max(vmin, voutCDEF);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 16;
mc -= 16 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
vout0123 = wasm_f32x4_max(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_min(vmax, vacc01);
vout01 = wasm_f32x4_max(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_min(vmax, vacc0);
vout0 = wasm_f32x4_max(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 7,631 | 37.938776 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-16x1-minmax-wasmsimd-x86-pipelined-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_x86_pipelined_x2(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 16 * sizeof(float);
while XNN_LIKELY(mc >= 16 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
v128_t vw = wasm_v128_load32_splat(w); w += 1;
intptr_t diff = *dmap++;
v128_t vi0123 = wasm_v128_load(input + 0);
v128_t vi4567 = wasm_v128_load(input + 4);
v128_t vi89AB = wasm_v128_load(input + 8);
v128_t viCDEF = wasm_v128_load(input + 12);
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = vw;
v128_t vacc4567 = vw;
v128_t vacc89AB = vw;
v128_t vaccCDEF = vw;
vw = wasm_v128_load32_splat(w); w += 1;
for (; nnz >= 2; nnz -= 2) {
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vi4567 = wasm_v128_load(input + 4);
vi89AB = wasm_v128_load(input + 8);
viCDEF = wasm_v128_load(input + 12);
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vi4567 = wasm_v128_load(input + 4);
vi89AB = wasm_v128_load(input + 8);
viCDEF = wasm_v128_load(input + 12);
}
if XNN_LIKELY(nnz != 0) {
do {
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vi4567 = wasm_v128_load(input + 4);
vi89AB = wasm_v128_load(input + 8);
viCDEF = wasm_v128_load(input + 12);
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_pmin(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_pmin(vmax, vaccCDEF);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
vout89AB = wasm_f32x4_pmax(vmin, vout89AB);
voutCDEF = wasm_f32x4_pmax(vmin, voutCDEF);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 16;
mc -= 16 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_pmin(vmax, vacc01);
vout01 = wasm_f32x4_pmax(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_pmin(vmax, vacc0);
vout0 = wasm_f32x4_pmax(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 9,168 | 38.865217 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-16x1-minmax-wasmsimd-x86-pipelined.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_x86_pipelined(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 16 * sizeof(float);
while XNN_LIKELY(mc >= 16 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
v128_t vw = wasm_v128_load32_splat(w); w += 1;
intptr_t diff = *dmap++;
v128_t vi0123 = wasm_v128_load(input + 0);
v128_t vi4567 = wasm_v128_load(input + 4);
v128_t vi89AB = wasm_v128_load(input + 8);
v128_t viCDEF = wasm_v128_load(input + 12);
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = vw;
v128_t vacc4567 = vw;
v128_t vacc89AB = vw;
v128_t vaccCDEF = vw;
vw = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vi4567 = wasm_v128_load(input + 4);
vi89AB = wasm_v128_load(input + 8);
viCDEF = wasm_v128_load(input + 12);
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_pmin(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_pmin(vmax, vaccCDEF);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
vout89AB = wasm_f32x4_pmax(vmin, vout89AB);
voutCDEF = wasm_f32x4_pmax(vmin, voutCDEF);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 16;
mc -= 16 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_pmin(vmax, vacc01);
vout01 = wasm_f32x4_pmax(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_pmin(vmax, vacc0);
vout0 = wasm_f32x4_pmax(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 7,866 | 37.18932 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-16x1-minmax-wasmsimd-x86-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_x86_x2(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 16 * sizeof(float);
while XNN_LIKELY(mc >= 16 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123x0 = wasm_v128_load32_splat(w);
w += 1;
v128_t vacc0123x1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc4567x0 = vacc0123x0;
v128_t vacc4567x1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc89ABx0 = vacc0123x0;
v128_t vacc89ABx1 = wasm_f32x4_const_splat(0.0f);
v128_t vaccCDEFx0 = vacc0123x0;
v128_t vaccCDEFx1 = wasm_f32x4_const_splat(0.0f);
for (; nnz >= 2; nnz -= 2) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
dmap += 2;
const v128_t vi0123x0 = wasm_v128_load(input);
const v128_t vi4567x0 = wasm_v128_load(input + 4);
const v128_t vi89ABx0 = wasm_v128_load(input + 8);
const v128_t viCDEFx0 = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
const v128_t vw0 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x0 = wasm_f32x4_add(vacc0123x0, wasm_f32x4_mul(vi0123x0, vw0));
vacc4567x0 = wasm_f32x4_add(vacc4567x0, wasm_f32x4_mul(vi4567x0, vw0));
vacc89ABx0 = wasm_f32x4_add(vacc89ABx0, wasm_f32x4_mul(vi89ABx0, vw0));
vaccCDEFx0 = wasm_f32x4_add(vaccCDEFx0, wasm_f32x4_mul(viCDEFx0, vw0));
const v128_t vi0123x1 = wasm_v128_load(input);
const v128_t vi4567x1 = wasm_v128_load(input + 4);
const v128_t vi89ABx1 = wasm_v128_load(input + 8);
const v128_t viCDEFx1 = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
const v128_t vw1 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x1 = wasm_f32x4_add(vacc0123x1, wasm_f32x4_mul(vi0123x1, vw1));
vacc4567x1 = wasm_f32x4_add(vacc4567x1, wasm_f32x4_mul(vi4567x1, vw1));
vacc89ABx1 = wasm_f32x4_add(vacc89ABx1, wasm_f32x4_mul(vi89ABx1, vw1));
vaccCDEFx1 = wasm_f32x4_add(vaccCDEFx1, wasm_f32x4_mul(viCDEFx1, vw1));
}
v128_t vacc0123 = vacc0123x0;
v128_t vacc4567 = vacc4567x0;
v128_t vacc89AB = vacc89ABx0;
v128_t vaccCDEF = vaccCDEFx0;
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x1);
vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x1);
vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx1);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx1);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_pmin(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_pmin(vmax, vaccCDEF);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
vout89AB = wasm_f32x4_pmax(vmin, vout89AB);
voutCDEF = wasm_f32x4_pmax(vmin, voutCDEF);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 16;
mc -= 16 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_pmin(vmax, vacc01);
vout01 = wasm_f32x4_pmax(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_pmin(vmax, vacc0);
vout0 = wasm_f32x4_pmax(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 9,808 | 40.563559 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-16x1-minmax-wasmsimd-x86-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_x86_x4(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 16 * sizeof(float);
while XNN_LIKELY(mc >= 16 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123x0 = wasm_v128_load32_splat(w);
w += 1;
v128_t vacc0123x1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc0123x2 = wasm_f32x4_const_splat(0.0f);
v128_t vacc0123x3 = wasm_f32x4_const_splat(0.0f);
v128_t vacc4567x0 = vacc0123x0;
v128_t vacc4567x1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc4567x2 = wasm_f32x4_const_splat(0.0f);
v128_t vacc4567x3 = wasm_f32x4_const_splat(0.0f);
v128_t vacc89ABx0 = vacc0123x0;
v128_t vacc89ABx1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc89ABx2 = wasm_f32x4_const_splat(0.0f);
v128_t vacc89ABx3 = wasm_f32x4_const_splat(0.0f);
v128_t vaccCDEFx0 = vacc0123x0;
v128_t vaccCDEFx1 = wasm_f32x4_const_splat(0.0f);
v128_t vaccCDEFx2 = wasm_f32x4_const_splat(0.0f);
v128_t vaccCDEFx3 = wasm_f32x4_const_splat(0.0f);
for (; nnz >= 4; nnz -= 4) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
const intptr_t diff2 = dmap[2];
const intptr_t diff3 = dmap[3];
dmap += 4;
const v128_t vi0123x0 = wasm_v128_load(input);
const v128_t vi4567x0 = wasm_v128_load(input + 4);
const v128_t vi89ABx0 = wasm_v128_load(input + 8);
const v128_t viCDEFx0 = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
const v128_t vw0 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x0 = wasm_f32x4_add(vacc0123x0, wasm_f32x4_mul(vi0123x0, vw0));
vacc4567x0 = wasm_f32x4_add(vacc4567x0, wasm_f32x4_mul(vi4567x0, vw0));
vacc89ABx0 = wasm_f32x4_add(vacc89ABx0, wasm_f32x4_mul(vi89ABx0, vw0));
vaccCDEFx0 = wasm_f32x4_add(vaccCDEFx0, wasm_f32x4_mul(viCDEFx0, vw0));
const v128_t vi0123x1 = wasm_v128_load(input);
const v128_t vi4567x1 = wasm_v128_load(input + 4);
const v128_t vi89ABx1 = wasm_v128_load(input + 8);
const v128_t viCDEFx1 = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
const v128_t vw1 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x1 = wasm_f32x4_add(vacc0123x1, wasm_f32x4_mul(vi0123x1, vw1));
vacc4567x1 = wasm_f32x4_add(vacc4567x1, wasm_f32x4_mul(vi4567x1, vw1));
vacc89ABx1 = wasm_f32x4_add(vacc89ABx1, wasm_f32x4_mul(vi89ABx1, vw1));
vaccCDEFx1 = wasm_f32x4_add(vaccCDEFx1, wasm_f32x4_mul(viCDEFx1, vw1));
const v128_t vi0123x2 = wasm_v128_load(input);
const v128_t vi4567x2 = wasm_v128_load(input + 4);
const v128_t vi89ABx2 = wasm_v128_load(input + 8);
const v128_t viCDEFx2 = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff2);
const v128_t vw2 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x2 = wasm_f32x4_add(vacc0123x2, wasm_f32x4_mul(vi0123x2, vw2));
vacc4567x2 = wasm_f32x4_add(vacc4567x2, wasm_f32x4_mul(vi4567x2, vw2));
vacc89ABx2 = wasm_f32x4_add(vacc89ABx2, wasm_f32x4_mul(vi89ABx2, vw2));
vaccCDEFx2 = wasm_f32x4_add(vaccCDEFx2, wasm_f32x4_mul(viCDEFx2, vw2));
const v128_t vi0123x3 = wasm_v128_load(input);
const v128_t vi4567x3 = wasm_v128_load(input + 4);
const v128_t vi89ABx3 = wasm_v128_load(input + 8);
const v128_t viCDEFx3 = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff3);
const v128_t vw3 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x3 = wasm_f32x4_add(vacc0123x3, wasm_f32x4_mul(vi0123x3, vw3));
vacc4567x3 = wasm_f32x4_add(vacc4567x3, wasm_f32x4_mul(vi4567x3, vw3));
vacc89ABx3 = wasm_f32x4_add(vacc89ABx3, wasm_f32x4_mul(vi89ABx3, vw3));
vaccCDEFx3 = wasm_f32x4_add(vaccCDEFx3, wasm_f32x4_mul(viCDEFx3, vw3));
}
v128_t vacc0123 = vacc0123x0;
v128_t vacc4567 = vacc4567x0;
v128_t vacc89AB = vacc89ABx0;
v128_t vaccCDEF = vaccCDEFx0;
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x1);
vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x1);
vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx1);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx1);
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x2);
vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x2);
vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx2);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx2);
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x3);
vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x3);
vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx3);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx3);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_pmin(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_pmin(vmax, vaccCDEF);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
vout89AB = wasm_f32x4_pmax(vmin, vout89AB);
voutCDEF = wasm_f32x4_pmax(vmin, voutCDEF);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 16;
mc -= 16 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_pmin(vmax, vacc01);
vout01 = wasm_f32x4_pmax(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_pmin(vmax, vacc0);
vout0 = wasm_f32x4_pmax(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 12,182 | 43.141304 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-16x1-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_x86(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 16 * sizeof(float);
while XNN_LIKELY(mc >= 16 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
v128_t vacc89AB = vacc0123;
v128_t vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_pmin(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_pmin(vmax, vaccCDEF);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
vout89AB = wasm_f32x4_pmax(vmin, vout89AB);
voutCDEF = wasm_f32x4_pmax(vmin, voutCDEF);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 16;
mc -= 16 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_pmin(vmax, vacc01);
vout01 = wasm_f32x4_pmax(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_pmin(vmax, vacc0);
vout0 = wasm_f32x4_pmax(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 7,649 | 38.030612 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-16x2-minmax-aarch64-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon-blocked.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_16x2__aarch64_neonfma(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 16 * sizeof(float);
while XNN_LIKELY(mc >= 16 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123n0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n0 = vacc0123n0;
float32x4_t vacc89ABn0 = vacc0123n0;
float32x4_t vaccCDEFn0 = vacc0123n0;
float32x4_t vacc0123n1 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n1 = vacc0123n1;
float32x4_t vacc89ABn1 = vacc0123n1;
float32x4_t vaccCDEFn1 = vacc0123n1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
const float32x4_t viCDEF = vld1q_f32(input + 12);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x2_t vw = vld1_f32(w); w += 2;
xnn_prefetch_to_l1(w + 32);
vacc0123n0 = vfmaq_lane_f32(vacc0123n0, vi0123, vw, 0);
vacc4567n0 = vfmaq_lane_f32(vacc4567n0, vi4567, vw, 0);
vacc89ABn0 = vfmaq_lane_f32(vacc89ABn0, vi89AB, vw, 0);
vaccCDEFn0 = vfmaq_lane_f32(vaccCDEFn0, viCDEF, vw, 0);
vacc0123n1 = vfmaq_lane_f32(vacc0123n1, vi0123, vw, 1);
vacc4567n1 = vfmaq_lane_f32(vacc4567n1, vi4567, vw, 1);
vacc89ABn1 = vfmaq_lane_f32(vacc89ABn1, vi89AB, vw, 1);
vaccCDEFn1 = vfmaq_lane_f32(vaccCDEFn1, viCDEF, vw, 1);
} while (--nnz != 0);
}
float32x4_t vout0123n0 = vminq_f32(vacc0123n0, vmax);
float32x4_t vout4567n0 = vminq_f32(vacc4567n0, vmax);
float32x4_t vout89ABn0 = vminq_f32(vacc89ABn0, vmax);
float32x4_t voutCDEFn0 = vminq_f32(vaccCDEFn0, vmax);
float32x4_t vout0123n1 = vminq_f32(vacc0123n1, vmax);
float32x4_t vout4567n1 = vminq_f32(vacc4567n1, vmax);
float32x4_t vout89ABn1 = vminq_f32(vacc89ABn1, vmax);
float32x4_t voutCDEFn1 = vminq_f32(vaccCDEFn1, vmax);
vout0123n0 = vmaxq_f32(vout0123n0, vmin);
vout4567n0 = vmaxq_f32(vout4567n0, vmin);
vout89ABn0 = vmaxq_f32(vout89ABn0, vmin);
voutCDEFn0 = vmaxq_f32(voutCDEFn0, vmin);
vout0123n1 = vmaxq_f32(vout0123n1, vmin);
vout4567n1 = vmaxq_f32(vout4567n1, vmin);
vout89ABn1 = vmaxq_f32(vout89ABn1, vmin);
voutCDEFn1 = vmaxq_f32(voutCDEFn1, vmin);
vst1q_f32(output + 0, vout0123n0);
vst1q_f32(output + 4, vout4567n0);
vst1q_f32(output + 8, vout89ABn0);
vst1q_f32(output + 12, voutCDEFn0);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n1);
vst1q_f32(output + 4, vout4567n1);
vst1q_f32(output + 8, vout89ABn1);
vst1q_f32(output + 12, voutCDEFn1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 2;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
float32x4_t vacc89AB = vacc0123;
float32x4_t vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
const float32x4_t viCDEF = vld1q_f32(input + 12);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
vacc89AB = vfmaq_f32(vacc89AB, vi89AB, vw);
vaccCDEF = vfmaq_f32(vaccCDEF, viCDEF, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
voutCDEF = vmaxq_f32(voutCDEF, vmin);
vst1q_f32(output + 0, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
vst1q_f32(output + 12, voutCDEF);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 16;
mc -= 16 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123n0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n0 = vacc0123n0;
float32x4_t vacc0123n1 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n1 = vacc0123n1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_f32(w); w += 2;
vacc0123n0 = vfmaq_lane_f32(vacc0123n0, vi0123, vw, 0);
vacc4567n0 = vfmaq_lane_f32(vacc4567n0, vi4567, vw, 0);
vacc0123n1 = vfmaq_lane_f32(vacc0123n1, vi0123, vw, 1);
vacc4567n1 = vfmaq_lane_f32(vacc4567n1, vi4567, vw, 1);
} while (--nnz != 0);
}
float32x4_t vout0123n0 = vminq_f32(vacc0123n0, vmax);
float32x4_t vout4567n0 = vminq_f32(vacc4567n0, vmax);
float32x4_t vout0123n1 = vminq_f32(vacc0123n1, vmax);
float32x4_t vout4567n1 = vminq_f32(vacc4567n1, vmax);
vout0123n0 = vmaxq_f32(vout0123n0, vmin);
vout4567n0 = vmaxq_f32(vout4567n0, vmin);
vout0123n1 = vmaxq_f32(vout0123n1, vmin);
vout4567n1 = vmaxq_f32(vout4567n1, vmin);
vst1q_f32(output + 0, vout0123n0);
vst1q_f32(output + 4, vout4567n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n1);
vst1q_f32(output + 4, vout4567n1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 2;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output + 0, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123n0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123n1 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_f32(w); w += 2;
vacc0123n0 = vfmaq_lane_f32(vacc0123n0, vi0123, vw, 0);
vacc0123n1 = vfmaq_lane_f32(vacc0123n1, vi0123, vw, 1);
} while (--nnz != 0);
}
float32x4_t vout0123n0 = vminq_f32(vacc0123n0, vmax);
float32x4_t vout0123n1 = vminq_f32(vacc0123n1, vmax);
vout0123n0 = vmaxq_f32(vout0123n0, vmin);
vout0123n1 = vmaxq_f32(vout0123n1, vmin);
vst1q_f32(output + 0, vout0123n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 2;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output + 0, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01n0 = vld1_dup_f32(w); w += 1;
float32x2_t vacc01n1 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_f32(w); w += 2;
vacc01n0 = vfma_lane_f32(vacc01n0, vi01, vw, 0);
vacc01n1 = vfma_lane_f32(vacc01n1, vi01, vw, 1);
} while (--nnz != 0);
}
float32x2_t vout01n0 = vmin_f32(vacc01n0, vget_low_f32(vmax));
float32x2_t vout01n1 = vmin_f32(vacc01n1, vget_low_f32(vmax));
vout01n0 = vmax_f32(vout01n0, vget_low_f32(vmin));
vout01n1 = vmax_f32(vout01n1, vget_low_f32(vmin));
vst1_f32(output + 0, vout01n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_f32(output + 0, vout01n1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 2;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vfma_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0n0 = vld1_dup_f32(w); w += 1;
float32x2_t vacc0n1 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_f32(w); w += 2;
vacc0n0 = vfma_lane_f32(vacc0n0, vi0, vw, 0);
vacc0n1 = vfma_lane_f32(vacc0n1, vi0, vw, 1);
} while (--nnz != 0);
}
float32x2_t vout0n0 = vmin_f32(vacc0n0, vget_low_f32(vmax));
float32x2_t vout0n1 = vmin_f32(vacc0n1, vget_low_f32(vmax));
vout0n0 = vmax_f32(vout0n0, vget_low_f32(vmin));
vout0n1 = vmax_f32(vout0n1, vget_low_f32(vmin));
vst1_lane_f32(output + 0, vout0n0, 0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_lane_f32(output + 0, vout0n1, 0);
output = (float*) ((uintptr_t) output + output_stride);
n -= 2;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vfma_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 16,204 | 37.954327 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-16x4-minmax-aarch64-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon-blocked.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_16x4__aarch64_neonfma(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 16 * sizeof(float);
while XNN_LIKELY(mc >= 16 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 4) {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123n0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n0 = vacc0123n0;
float32x4_t vacc89ABn0 = vacc0123n0;
float32x4_t vaccCDEFn0 = vacc0123n0;
float32x4_t vacc0123n1 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n1 = vacc0123n1;
float32x4_t vacc89ABn1 = vacc0123n1;
float32x4_t vaccCDEFn1 = vacc0123n1;
float32x4_t vacc0123n2 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n2 = vacc0123n2;
float32x4_t vacc89ABn2 = vacc0123n2;
float32x4_t vaccCDEFn2 = vacc0123n2;
float32x4_t vacc0123n3 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n3 = vacc0123n3;
float32x4_t vacc89ABn3 = vacc0123n3;
float32x4_t vaccCDEFn3 = vacc0123n3;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
const float32x4_t viCDEF = vld1q_f32(input + 12);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_f32(w); w += 4;
xnn_prefetch_to_l1(w + 32);
vacc0123n0 = vfmaq_laneq_f32(vacc0123n0, vi0123, vw, 0);
vacc4567n0 = vfmaq_laneq_f32(vacc4567n0, vi4567, vw, 0);
vacc89ABn0 = vfmaq_laneq_f32(vacc89ABn0, vi89AB, vw, 0);
vaccCDEFn0 = vfmaq_laneq_f32(vaccCDEFn0, viCDEF, vw, 0);
vacc0123n1 = vfmaq_laneq_f32(vacc0123n1, vi0123, vw, 1);
vacc4567n1 = vfmaq_laneq_f32(vacc4567n1, vi4567, vw, 1);
vacc89ABn1 = vfmaq_laneq_f32(vacc89ABn1, vi89AB, vw, 1);
vaccCDEFn1 = vfmaq_laneq_f32(vaccCDEFn1, viCDEF, vw, 1);
vacc0123n2 = vfmaq_laneq_f32(vacc0123n2, vi0123, vw, 2);
vacc4567n2 = vfmaq_laneq_f32(vacc4567n2, vi4567, vw, 2);
vacc89ABn2 = vfmaq_laneq_f32(vacc89ABn2, vi89AB, vw, 2);
vaccCDEFn2 = vfmaq_laneq_f32(vaccCDEFn2, viCDEF, vw, 2);
vacc0123n3 = vfmaq_laneq_f32(vacc0123n3, vi0123, vw, 3);
vacc4567n3 = vfmaq_laneq_f32(vacc4567n3, vi4567, vw, 3);
vacc89ABn3 = vfmaq_laneq_f32(vacc89ABn3, vi89AB, vw, 3);
vaccCDEFn3 = vfmaq_laneq_f32(vaccCDEFn3, viCDEF, vw, 3);
} while (--nnz != 0);
}
float32x4_t vout0123n0 = vminq_f32(vacc0123n0, vmax);
float32x4_t vout4567n0 = vminq_f32(vacc4567n0, vmax);
float32x4_t vout89ABn0 = vminq_f32(vacc89ABn0, vmax);
float32x4_t voutCDEFn0 = vminq_f32(vaccCDEFn0, vmax);
float32x4_t vout0123n1 = vminq_f32(vacc0123n1, vmax);
float32x4_t vout4567n1 = vminq_f32(vacc4567n1, vmax);
float32x4_t vout89ABn1 = vminq_f32(vacc89ABn1, vmax);
float32x4_t voutCDEFn1 = vminq_f32(vaccCDEFn1, vmax);
float32x4_t vout0123n2 = vminq_f32(vacc0123n2, vmax);
float32x4_t vout4567n2 = vminq_f32(vacc4567n2, vmax);
float32x4_t vout89ABn2 = vminq_f32(vacc89ABn2, vmax);
float32x4_t voutCDEFn2 = vminq_f32(vaccCDEFn2, vmax);
float32x4_t vout0123n3 = vminq_f32(vacc0123n3, vmax);
float32x4_t vout4567n3 = vminq_f32(vacc4567n3, vmax);
float32x4_t vout89ABn3 = vminq_f32(vacc89ABn3, vmax);
float32x4_t voutCDEFn3 = vminq_f32(vaccCDEFn3, vmax);
vout0123n0 = vmaxq_f32(vout0123n0, vmin);
vout4567n0 = vmaxq_f32(vout4567n0, vmin);
vout89ABn0 = vmaxq_f32(vout89ABn0, vmin);
voutCDEFn0 = vmaxq_f32(voutCDEFn0, vmin);
vout0123n1 = vmaxq_f32(vout0123n1, vmin);
vout4567n1 = vmaxq_f32(vout4567n1, vmin);
vout89ABn1 = vmaxq_f32(vout89ABn1, vmin);
voutCDEFn1 = vmaxq_f32(voutCDEFn1, vmin);
vout0123n2 = vmaxq_f32(vout0123n2, vmin);
vout4567n2 = vmaxq_f32(vout4567n2, vmin);
vout89ABn2 = vmaxq_f32(vout89ABn2, vmin);
voutCDEFn2 = vmaxq_f32(voutCDEFn2, vmin);
vout0123n3 = vmaxq_f32(vout0123n3, vmin);
vout4567n3 = vmaxq_f32(vout4567n3, vmin);
vout89ABn3 = vmaxq_f32(vout89ABn3, vmin);
voutCDEFn3 = vmaxq_f32(voutCDEFn3, vmin);
vst1q_f32(output + 0, vout0123n0);
vst1q_f32(output + 4, vout4567n0);
vst1q_f32(output + 8, vout89ABn0);
vst1q_f32(output + 12, voutCDEFn0);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n1);
vst1q_f32(output + 4, vout4567n1);
vst1q_f32(output + 8, vout89ABn1);
vst1q_f32(output + 12, voutCDEFn1);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n2);
vst1q_f32(output + 4, vout4567n2);
vst1q_f32(output + 8, vout89ABn2);
vst1q_f32(output + 12, voutCDEFn2);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n3);
vst1q_f32(output + 4, vout4567n3);
vst1q_f32(output + 8, vout89ABn3);
vst1q_f32(output + 12, voutCDEFn3);
output = (float*) ((uintptr_t) output + output_stride);
n -= 4;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
float32x4_t vacc89AB = vacc0123;
float32x4_t vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
const float32x4_t viCDEF = vld1q_f32(input + 12);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
vacc89AB = vfmaq_f32(vacc89AB, vi89AB, vw);
vaccCDEF = vfmaq_f32(vaccCDEF, viCDEF, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
voutCDEF = vmaxq_f32(voutCDEF, vmin);
vst1q_f32(output + 0, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
vst1q_f32(output + 12, voutCDEF);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 16;
mc -= 16 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 4) {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123n0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n0 = vacc0123n0;
float32x4_t vacc0123n1 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n1 = vacc0123n1;
float32x4_t vacc0123n2 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n2 = vacc0123n2;
float32x4_t vacc0123n3 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n3 = vacc0123n3;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_f32(w); w += 4;
vacc0123n0 = vfmaq_laneq_f32(vacc0123n0, vi0123, vw, 0);
vacc4567n0 = vfmaq_laneq_f32(vacc4567n0, vi4567, vw, 0);
vacc0123n1 = vfmaq_laneq_f32(vacc0123n1, vi0123, vw, 1);
vacc4567n1 = vfmaq_laneq_f32(vacc4567n1, vi4567, vw, 1);
vacc0123n2 = vfmaq_laneq_f32(vacc0123n2, vi0123, vw, 2);
vacc4567n2 = vfmaq_laneq_f32(vacc4567n2, vi4567, vw, 2);
vacc0123n3 = vfmaq_laneq_f32(vacc0123n3, vi0123, vw, 3);
vacc4567n3 = vfmaq_laneq_f32(vacc4567n3, vi4567, vw, 3);
} while (--nnz != 0);
}
float32x4_t vout0123n0 = vminq_f32(vacc0123n0, vmax);
float32x4_t vout4567n0 = vminq_f32(vacc4567n0, vmax);
float32x4_t vout0123n1 = vminq_f32(vacc0123n1, vmax);
float32x4_t vout4567n1 = vminq_f32(vacc4567n1, vmax);
float32x4_t vout0123n2 = vminq_f32(vacc0123n2, vmax);
float32x4_t vout4567n2 = vminq_f32(vacc4567n2, vmax);
float32x4_t vout0123n3 = vminq_f32(vacc0123n3, vmax);
float32x4_t vout4567n3 = vminq_f32(vacc4567n3, vmax);
vout0123n0 = vmaxq_f32(vout0123n0, vmin);
vout4567n0 = vmaxq_f32(vout4567n0, vmin);
vout0123n1 = vmaxq_f32(vout0123n1, vmin);
vout4567n1 = vmaxq_f32(vout4567n1, vmin);
vout0123n2 = vmaxq_f32(vout0123n2, vmin);
vout4567n2 = vmaxq_f32(vout4567n2, vmin);
vout0123n3 = vmaxq_f32(vout0123n3, vmin);
vout4567n3 = vmaxq_f32(vout4567n3, vmin);
vst1q_f32(output + 0, vout0123n0);
vst1q_f32(output + 4, vout4567n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n1);
vst1q_f32(output + 4, vout4567n1);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n2);
vst1q_f32(output + 4, vout4567n2);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n3);
vst1q_f32(output + 4, vout4567n3);
output = (float*) ((uintptr_t) output + output_stride);
n -= 4;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output + 0, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 4) {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123n0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123n1 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123n2 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123n3 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_f32(w); w += 4;
vacc0123n0 = vfmaq_laneq_f32(vacc0123n0, vi0123, vw, 0);
vacc0123n1 = vfmaq_laneq_f32(vacc0123n1, vi0123, vw, 1);
vacc0123n2 = vfmaq_laneq_f32(vacc0123n2, vi0123, vw, 2);
vacc0123n3 = vfmaq_laneq_f32(vacc0123n3, vi0123, vw, 3);
} while (--nnz != 0);
}
float32x4_t vout0123n0 = vminq_f32(vacc0123n0, vmax);
float32x4_t vout0123n1 = vminq_f32(vacc0123n1, vmax);
float32x4_t vout0123n2 = vminq_f32(vacc0123n2, vmax);
float32x4_t vout0123n3 = vminq_f32(vacc0123n3, vmax);
vout0123n0 = vmaxq_f32(vout0123n0, vmin);
vout0123n1 = vmaxq_f32(vout0123n1, vmin);
vout0123n2 = vmaxq_f32(vout0123n2, vmin);
vout0123n3 = vmaxq_f32(vout0123n3, vmin);
vst1q_f32(output + 0, vout0123n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n1);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n2);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n3);
output = (float*) ((uintptr_t) output + output_stride);
n -= 4;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output + 0, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 4) {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01n0 = vld1_dup_f32(w); w += 1;
float32x2_t vacc01n1 = vld1_dup_f32(w); w += 1;
float32x2_t vacc01n2 = vld1_dup_f32(w); w += 1;
float32x2_t vacc01n3 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_f32(w); w += 4;
vacc01n0 = vfma_laneq_f32(vacc01n0, vi01, vw, 0);
vacc01n1 = vfma_laneq_f32(vacc01n1, vi01, vw, 1);
vacc01n2 = vfma_laneq_f32(vacc01n2, vi01, vw, 2);
vacc01n3 = vfma_laneq_f32(vacc01n3, vi01, vw, 3);
} while (--nnz != 0);
}
float32x2_t vout01n0 = vmin_f32(vacc01n0, vget_low_f32(vmax));
float32x2_t vout01n1 = vmin_f32(vacc01n1, vget_low_f32(vmax));
float32x2_t vout01n2 = vmin_f32(vacc01n2, vget_low_f32(vmax));
float32x2_t vout01n3 = vmin_f32(vacc01n3, vget_low_f32(vmax));
vout01n0 = vmax_f32(vout01n0, vget_low_f32(vmin));
vout01n1 = vmax_f32(vout01n1, vget_low_f32(vmin));
vout01n2 = vmax_f32(vout01n2, vget_low_f32(vmin));
vout01n3 = vmax_f32(vout01n3, vget_low_f32(vmin));
vst1_f32(output + 0, vout01n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_f32(output + 0, vout01n1);
output = (float*) ((uintptr_t) output + output_stride);
vst1_f32(output + 0, vout01n2);
output = (float*) ((uintptr_t) output + output_stride);
vst1_f32(output + 0, vout01n3);
output = (float*) ((uintptr_t) output + output_stride);
n -= 4;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vfma_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 4) {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0n0 = vld1_dup_f32(w); w += 1;
float32x2_t vacc0n1 = vld1_dup_f32(w); w += 1;
float32x2_t vacc0n2 = vld1_dup_f32(w); w += 1;
float32x2_t vacc0n3 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_f32(w); w += 4;
vacc0n0 = vfma_laneq_f32(vacc0n0, vi0, vw, 0);
vacc0n1 = vfma_laneq_f32(vacc0n1, vi0, vw, 1);
vacc0n2 = vfma_laneq_f32(vacc0n2, vi0, vw, 2);
vacc0n3 = vfma_laneq_f32(vacc0n3, vi0, vw, 3);
} while (--nnz != 0);
}
float32x2_t vout0n0 = vmin_f32(vacc0n0, vget_low_f32(vmax));
float32x2_t vout0n1 = vmin_f32(vacc0n1, vget_low_f32(vmax));
float32x2_t vout0n2 = vmin_f32(vacc0n2, vget_low_f32(vmax));
float32x2_t vout0n3 = vmin_f32(vacc0n3, vget_low_f32(vmax));
vout0n0 = vmax_f32(vout0n0, vget_low_f32(vmin));
vout0n1 = vmax_f32(vout0n1, vget_low_f32(vmin));
vout0n2 = vmax_f32(vout0n2, vget_low_f32(vmin));
vout0n3 = vmax_f32(vout0n3, vget_low_f32(vmin));
vst1_lane_f32(output + 0, vout0n0, 0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_lane_f32(output + 0, vout0n1, 0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_lane_f32(output + 0, vout0n2, 0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_lane_f32(output + 0, vout0n3, 0);
output = (float*) ((uintptr_t) output + output_stride);
n -= 4;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vfma_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 21,785 | 41.22093 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-1x1-minmax-scalar-pipelined.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/scalar-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_1x1__scalar_pipelined(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
size_t output_decrement = output_stride * nc - 1 * sizeof(float);
while XNN_LIKELY(mc >= 1 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
float vw = *w++;
intptr_t diff = *dmap++;
float vi0 = input[0];
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float vacc0 = vw;
vw = *w++;
if XNN_LIKELY(nnz != 0) {
do {
vacc0 += vi0 * vw;
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = *w++;
vi0 = input[0];
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
vout0 = math_max_f32(vout0, vmin);
output[0] = vout0;
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
mc -= 1 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
}
}
| 1,865 | 26.441176 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-1x1-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_1x1__scalar(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
size_t output_decrement = output_stride * nc - 1 * sizeof(float);
while (mc >= 1 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 1) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = *w++;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = *w++;
vacc0x0 += vi0 * vw0;
} while (--nnz != 0);
}
float vout0x0 = math_min_f32(vacc0x0, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
output[0] = vout0x0;
output[0] = vout0x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = *w++;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = *w++;
vacc0 += vi0 * vw;
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
vout0 = math_max_f32(vout0, vmin);
output[0] = vout0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
mc -= 1 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
}
}
| 2,496 | 28.376471 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-2x1-minmax-scalar-pipelined.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/scalar-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_2x1__scalar_pipelined(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
size_t output_decrement = output_stride * nc - 2 * sizeof(float);
while XNN_LIKELY(mc >= 2 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
float vw = *w++;
intptr_t diff = *dmap++;
float vi0 = input[0];
float vi1 = input[1];
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float vacc0 = vw;
float vacc1 = vw;
vw = *w++;
if XNN_LIKELY(nnz != 0) {
do {
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = *w++;
vi0 = input[0];
vi1 = input[1];
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
output[0] = vout0;
output[1] = vout1;
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
mc -= 2 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
float vw = *w++;
intptr_t diff = *dmap++;
float vi0 = input[0];
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float vacc0 = vw;
vw = *w++;
if XNN_LIKELY(nnz != 0) {
do {
vacc0 += vi0 * vw;
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = *w++;
vi0 = input[0];
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
vout0 = math_max_f32(vout0, vmin);
output[0] = vout0;
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 3,069 | 27.962264 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-2x1-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_2x1__scalar(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
size_t output_decrement = output_stride * nc - 2 * sizeof(float);
while (mc >= 2 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 1) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = *w++;
float vacc1x0 = vacc0x0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = *w++;
vacc0x0 += vi0 * vw0;
vacc1x0 += vi1 * vw0;
} while (--nnz != 0);
}
float vout0x0 = math_min_f32(vacc0x0, vmax);
float vout1x0 = math_min_f32(vacc1x0, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
vout1x0 = math_max_f32(vout1x0, vmin);
output[0] = vout0x0;
output[1] = vout1x0;
output[0] = vout0x0;
output[1] = vout1x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = *w++;
float vacc1 = vacc0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = *w++;
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
output[0] = vout0;
output[1] = vout1;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
mc -= 2 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 1) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = *w++;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = *w++;
vacc0x0 += vi0 * vw0;
} while (--nnz != 0);
}
float vout0x0 = math_min_f32(vacc0x0, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
output[0] = vout0x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = *w++;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = *w++;
vacc0 += vi0 * vw;
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
vout0 = math_max_f32(vout0, vmin);
output[0] = vout0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 4,620 | 30.868966 | 84 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-32x1-minmax-neon-pipelined.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_32x1__neon_pipelined(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 32 * sizeof(float);
while XNN_LIKELY(mc >= 32 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
float32x4_t vw = vld1q_dup_f32(w); w += 1;
intptr_t diff = *dmap++;
float32x4_t vi0123 = vld1q_f32(input);
float32x4_t vi4567 = vld1q_f32(input + 4);
float32x4_t vi89AB = vld1q_f32(input + 8);
float32x4_t viCDEF = vld1q_f32(input + 12);
float32x4_t viGHIJ = vld1q_f32(input + 16);
float32x4_t viKLMN = vld1q_f32(input + 20);
float32x4_t viOPQR = vld1q_f32(input + 24);
float32x4_t viSTUV = vld1q_f32(input + 28);
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vw;
float32x4_t vacc4567 = vw;
float32x4_t vacc89AB = vw;
float32x4_t vaccCDEF = vw;
float32x4_t vaccGHIJ = vw;
float32x4_t vaccKLMN = vw;
float32x4_t vaccOPQR = vw;
float32x4_t vaccSTUV = vw;
vw = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
vacc89AB = vmlaq_f32(vacc89AB, vi89AB, vw);
vaccCDEF = vmlaq_f32(vaccCDEF, viCDEF, vw);
vaccGHIJ = vmlaq_f32(vaccGHIJ, viGHIJ, vw);
vaccKLMN = vmlaq_f32(vaccKLMN, viKLMN, vw);
vaccOPQR = vmlaq_f32(vaccOPQR, viOPQR, vw);
vaccSTUV = vmlaq_f32(vaccSTUV, viSTUV, vw);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
xnn_prefetch_to_l1(input + 32);
diff = *dmap++;
vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vi0123 = vld1q_f32(input);
vi4567 = vld1q_f32(input + 4);
vi89AB = vld1q_f32(input + 8);
viCDEF = vld1q_f32(input + 12);
viGHIJ = vld1q_f32(input + 16);
viKLMN = vld1q_f32(input + 20);
viOPQR = vld1q_f32(input + 24);
viSTUV = vld1q_f32(input + 28);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
float32x4_t voutGHIJ = vminq_f32(vaccGHIJ, vmax);
float32x4_t voutKLMN = vminq_f32(vaccKLMN, vmax);
float32x4_t voutOPQR = vminq_f32(vaccOPQR, vmax);
float32x4_t voutSTUV = vminq_f32(vaccSTUV, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
voutCDEF = vmaxq_f32(voutCDEF, vmin);
voutGHIJ = vmaxq_f32(voutGHIJ, vmin);
voutKLMN = vmaxq_f32(voutKLMN, vmin);
voutOPQR = vmaxq_f32(voutOPQR, vmin);
voutSTUV = vmaxq_f32(voutSTUV, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
vst1q_f32(output + 12, voutCDEF);
vst1q_f32(output + 16, voutGHIJ);
vst1q_f32(output + 20, voutKLMN);
vst1q_f32(output + 24, voutOPQR);
vst1q_f32(output + 28, voutSTUV);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 32;
mc -= 32 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 16 * sizeof(float);
if (mc & (16 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
float32x4_t vacc89AB = vacc0123;
float32x4_t vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
const float32x4_t viCDEF = vld1q_f32(input + 12);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
xnn_prefetch_to_l1(input + 32);
const float32x4_t vb = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vmlaq_f32(vacc0123, vi0123, vb);
vacc4567 = vmlaq_f32(vacc4567, vi4567, vb);
vacc89AB = vmlaq_f32(vacc89AB, vi89AB, vb);
vaccCDEF = vmlaq_f32(vaccCDEF, viCDEF, vb);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
voutCDEF = vmaxq_f32(voutCDEF, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
vst1q_f32(output + 12, voutCDEF);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 16;
}
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
xnn_prefetch_to_l1(input + 32);
const float32x4_t vb = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vmlaq_f32(vacc0123, vi0123, vb);
vacc4567 = vmlaq_f32(vacc4567, vi4567, vb);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
xnn_prefetch_to_l1(input + 32);
const float32x4_t vb = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vmlaq_f32(vacc0123, vi0123, vb);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
xnn_prefetch_to_l1(input + 32);
const float32x2_t vb = vld1_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc01 = vmla_f32(vacc01, vi01, vb);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
xnn_prefetch_to_l1(input + 32);
const float32x2_t vb = vld1_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0 = vmla_f32(vacc0, vi0, vb);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 11,650 | 38.097315 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-32x1-minmax-neon-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_32x1__neon_x2(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 32 * sizeof(float);
while XNN_LIKELY(mc >= 32 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123x0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123x1 = vmovq_n_f32(0.0f);
float32x4_t vacc4567x0 = vacc0123x0;
float32x4_t vacc4567x1 = vmovq_n_f32(0.0f);
float32x4_t vacc89ABx0 = vacc0123x0;
float32x4_t vacc89ABx1 = vmovq_n_f32(0.0f);
float32x4_t vaccCDEFx0 = vacc0123x0;
float32x4_t vaccCDEFx1 = vmovq_n_f32(0.0f);
float32x4_t vaccGHIJx0 = vacc0123x0;
float32x4_t vaccGHIJx1 = vmovq_n_f32(0.0f);
float32x4_t vaccKLMNx0 = vacc0123x0;
float32x4_t vaccKLMNx1 = vmovq_n_f32(0.0f);
float32x4_t vaccOPQRx0 = vacc0123x0;
float32x4_t vaccOPQRx1 = vmovq_n_f32(0.0f);
float32x4_t vaccSTUVx0 = vacc0123x0;
float32x4_t vaccSTUVx1 = vmovq_n_f32(0.0f);
for (; nnz >= 2; nnz -= 2) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
dmap += 2;
const float32x4_t vi0123x0 = vld1q_f32(input);
const float32x4_t vi4567x0 = vld1q_f32(input + 4);
const float32x4_t vi89ABx0 = vld1q_f32(input + 8);
const float32x4_t viCDEFx0 = vld1q_f32(input + 12);
const float32x4_t viGHIJx0 = vld1q_f32(input + 16);
const float32x4_t viKLMNx0 = vld1q_f32(input + 20);
const float32x4_t viOPQRx0 = vld1q_f32(input + 24);
const float32x4_t viSTUVx0 = vld1q_f32(input + 28);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff0);
xnn_prefetch_to_l1(input + 16);
xnn_prefetch_to_l1(input + 32);
const float32x4_t vw0 = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123x0 = vmlaq_f32(vacc0123x0, vi0123x0, vw0);
vacc4567x0 = vmlaq_f32(vacc4567x0, vi4567x0, vw0);
vacc89ABx0 = vmlaq_f32(vacc89ABx0, vi89ABx0, vw0);
vaccCDEFx0 = vmlaq_f32(vaccCDEFx0, viCDEFx0, vw0);
vaccGHIJx0 = vmlaq_f32(vaccGHIJx0, viGHIJx0, vw0);
vaccKLMNx0 = vmlaq_f32(vaccKLMNx0, viKLMNx0, vw0);
vaccOPQRx0 = vmlaq_f32(vaccOPQRx0, viOPQRx0, vw0);
vaccSTUVx0 = vmlaq_f32(vaccSTUVx0, viSTUVx0, vw0);
const float32x4_t vi0123x1 = vld1q_f32(input);
const float32x4_t vi4567x1 = vld1q_f32(input + 4);
const float32x4_t vi89ABx1 = vld1q_f32(input + 8);
const float32x4_t viCDEFx1 = vld1q_f32(input + 12);
const float32x4_t viGHIJx1 = vld1q_f32(input + 16);
const float32x4_t viKLMNx1 = vld1q_f32(input + 20);
const float32x4_t viOPQRx1 = vld1q_f32(input + 24);
const float32x4_t viSTUVx1 = vld1q_f32(input + 28);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff1);
xnn_prefetch_to_l1(input + 16);
xnn_prefetch_to_l1(input + 32);
const float32x4_t vw1 = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123x1 = vmlaq_f32(vacc0123x1, vi0123x1, vw1);
vacc4567x1 = vmlaq_f32(vacc4567x1, vi4567x1, vw1);
vacc89ABx1 = vmlaq_f32(vacc89ABx1, vi89ABx1, vw1);
vaccCDEFx1 = vmlaq_f32(vaccCDEFx1, viCDEFx1, vw1);
vaccGHIJx1 = vmlaq_f32(vaccGHIJx1, viGHIJx1, vw1);
vaccKLMNx1 = vmlaq_f32(vaccKLMNx1, viKLMNx1, vw1);
vaccOPQRx1 = vmlaq_f32(vaccOPQRx1, viOPQRx1, vw1);
vaccSTUVx1 = vmlaq_f32(vaccSTUVx1, viSTUVx1, vw1);
}
float32x4_t vacc0123 = vacc0123x0;
float32x4_t vacc4567 = vacc4567x0;
float32x4_t vacc89AB = vacc89ABx0;
float32x4_t vaccCDEF = vaccCDEFx0;
float32x4_t vaccGHIJ = vaccGHIJx0;
float32x4_t vaccKLMN = vaccKLMNx0;
float32x4_t vaccOPQR = vaccOPQRx0;
float32x4_t vaccSTUV = vaccSTUVx0;
vacc0123 = vaddq_f32(vacc0123, vacc0123x1);
vacc4567 = vaddq_f32(vacc4567, vacc4567x1);
vacc89AB = vaddq_f32(vacc89AB, vacc89ABx1);
vaccCDEF = vaddq_f32(vaccCDEF, vaccCDEFx1);
vaccGHIJ = vaddq_f32(vaccGHIJ, vaccGHIJx1);
vaccKLMN = vaddq_f32(vaccKLMN, vaccKLMNx1);
vaccOPQR = vaddq_f32(vaccOPQR, vaccOPQRx1);
vaccSTUV = vaddq_f32(vaccSTUV, vaccSTUVx1);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
const float32x4_t viCDEF = vld1q_f32(input + 12);
const float32x4_t viGHIJ = vld1q_f32(input + 16);
const float32x4_t viKLMN = vld1q_f32(input + 20);
const float32x4_t viOPQR = vld1q_f32(input + 24);
const float32x4_t viSTUV = vld1q_f32(input + 28);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
xnn_prefetch_to_l1(input + 32);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
vacc89AB = vmlaq_f32(vacc89AB, vi89AB, vw);
vaccCDEF = vmlaq_f32(vaccCDEF, viCDEF, vw);
vaccGHIJ = vmlaq_f32(vaccGHIJ, viGHIJ, vw);
vaccKLMN = vmlaq_f32(vaccKLMN, viKLMN, vw);
vaccOPQR = vmlaq_f32(vaccOPQR, viOPQR, vw);
vaccSTUV = vmlaq_f32(vaccSTUV, viSTUV, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
float32x4_t voutGHIJ = vminq_f32(vaccGHIJ, vmax);
float32x4_t voutKLMN = vminq_f32(vaccKLMN, vmax);
float32x4_t voutOPQR = vminq_f32(vaccOPQR, vmax);
float32x4_t voutSTUV = vminq_f32(vaccSTUV, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
voutCDEF = vmaxq_f32(voutCDEF, vmin);
voutGHIJ = vmaxq_f32(voutGHIJ, vmin);
voutKLMN = vmaxq_f32(voutKLMN, vmin);
voutOPQR = vmaxq_f32(voutOPQR, vmin);
voutSTUV = vmaxq_f32(voutSTUV, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
vst1q_f32(output + 12, voutCDEF);
vst1q_f32(output + 16, voutGHIJ);
vst1q_f32(output + 20, voutKLMN);
vst1q_f32(output + 24, voutOPQR);
vst1q_f32(output + 28, voutSTUV);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 32;
mc -= 32 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 16 * sizeof(float);
if (mc & (16 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
float32x4_t vacc89AB = vacc0123;
float32x4_t vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
const float32x4_t viCDEF = vld1q_f32(input + 12);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
vacc89AB = vmlaq_f32(vacc89AB, vi89AB, vw);
vaccCDEF = vmlaq_f32(vaccCDEF, viCDEF, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
voutCDEF = vmaxq_f32(voutCDEF, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
vst1q_f32(output + 12, voutCDEF);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 16;
}
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vmla_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vmla_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 14,426 | 41.061224 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-32x1-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_32x1__neon(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 32 * sizeof(float);
while XNN_LIKELY(mc >= 32 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
float32x4_t vacc89AB = vacc0123;
float32x4_t vaccCDEF = vacc0123;
float32x4_t vaccGHIJ = vacc0123;
float32x4_t vaccKLMN = vacc0123;
float32x4_t vaccOPQR = vacc0123;
float32x4_t vaccSTUV = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
const float32x4_t viCDEF = vld1q_f32(input + 12);
const float32x4_t viGHIJ = vld1q_f32(input + 16);
const float32x4_t viKLMN = vld1q_f32(input + 20);
const float32x4_t viOPQR = vld1q_f32(input + 24);
const float32x4_t viSTUV = vld1q_f32(input + 28);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
xnn_prefetch_to_l1(input + 32);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
vacc89AB = vmlaq_f32(vacc89AB, vi89AB, vw);
vaccCDEF = vmlaq_f32(vaccCDEF, viCDEF, vw);
vaccGHIJ = vmlaq_f32(vaccGHIJ, viGHIJ, vw);
vaccKLMN = vmlaq_f32(vaccKLMN, viKLMN, vw);
vaccOPQR = vmlaq_f32(vaccOPQR, viOPQR, vw);
vaccSTUV = vmlaq_f32(vaccSTUV, viSTUV, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
float32x4_t voutGHIJ = vminq_f32(vaccGHIJ, vmax);
float32x4_t voutKLMN = vminq_f32(vaccKLMN, vmax);
float32x4_t voutOPQR = vminq_f32(vaccOPQR, vmax);
float32x4_t voutSTUV = vminq_f32(vaccSTUV, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
voutCDEF = vmaxq_f32(voutCDEF, vmin);
voutGHIJ = vmaxq_f32(voutGHIJ, vmin);
voutKLMN = vmaxq_f32(voutKLMN, vmin);
voutOPQR = vmaxq_f32(voutOPQR, vmin);
voutSTUV = vmaxq_f32(voutSTUV, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
vst1q_f32(output + 12, voutCDEF);
vst1q_f32(output + 16, voutGHIJ);
vst1q_f32(output + 20, voutKLMN);
vst1q_f32(output + 24, voutOPQR);
vst1q_f32(output + 28, voutSTUV);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 32;
mc -= 32 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 16 * sizeof(float);
if (mc & (16 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
float32x4_t vacc89AB = vacc0123;
float32x4_t vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
const float32x4_t viCDEF = vld1q_f32(input + 12);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
vacc89AB = vmlaq_f32(vacc89AB, vi89AB, vw);
vaccCDEF = vmlaq_f32(vaccCDEF, viCDEF, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
voutCDEF = vmaxq_f32(voutCDEF, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
vst1q_f32(output + 12, voutCDEF);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 16;
}
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vmla_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vmla_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 10,741 | 38.492647 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-32x1-minmax-neonfma-pipelined.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_32x1__neonfma_pipelined(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 32 * sizeof(float);
while XNN_LIKELY(mc >= 32 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
float32x4_t vw = vld1q_dup_f32(w); w += 1;
intptr_t diff = *dmap++;
float32x4_t vi0123 = vld1q_f32(input);
float32x4_t vi4567 = vld1q_f32(input + 4);
float32x4_t vi89AB = vld1q_f32(input + 8);
float32x4_t viCDEF = vld1q_f32(input + 12);
float32x4_t viGHIJ = vld1q_f32(input + 16);
float32x4_t viKLMN = vld1q_f32(input + 20);
float32x4_t viOPQR = vld1q_f32(input + 24);
float32x4_t viSTUV = vld1q_f32(input + 28);
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vw;
float32x4_t vacc4567 = vw;
float32x4_t vacc89AB = vw;
float32x4_t vaccCDEF = vw;
float32x4_t vaccGHIJ = vw;
float32x4_t vaccKLMN = vw;
float32x4_t vaccOPQR = vw;
float32x4_t vaccSTUV = vw;
vw = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
vacc89AB = vfmaq_f32(vacc89AB, vi89AB, vw);
vaccCDEF = vfmaq_f32(vaccCDEF, viCDEF, vw);
vaccGHIJ = vfmaq_f32(vaccGHIJ, viGHIJ, vw);
vaccKLMN = vfmaq_f32(vaccKLMN, viKLMN, vw);
vaccOPQR = vfmaq_f32(vaccOPQR, viOPQR, vw);
vaccSTUV = vfmaq_f32(vaccSTUV, viSTUV, vw);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
xnn_prefetch_to_l1(input + 32);
diff = *dmap++;
vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vi0123 = vld1q_f32(input);
vi4567 = vld1q_f32(input + 4);
vi89AB = vld1q_f32(input + 8);
viCDEF = vld1q_f32(input + 12);
viGHIJ = vld1q_f32(input + 16);
viKLMN = vld1q_f32(input + 20);
viOPQR = vld1q_f32(input + 24);
viSTUV = vld1q_f32(input + 28);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
float32x4_t voutGHIJ = vminq_f32(vaccGHIJ, vmax);
float32x4_t voutKLMN = vminq_f32(vaccKLMN, vmax);
float32x4_t voutOPQR = vminq_f32(vaccOPQR, vmax);
float32x4_t voutSTUV = vminq_f32(vaccSTUV, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
voutCDEF = vmaxq_f32(voutCDEF, vmin);
voutGHIJ = vmaxq_f32(voutGHIJ, vmin);
voutKLMN = vmaxq_f32(voutKLMN, vmin);
voutOPQR = vmaxq_f32(voutOPQR, vmin);
voutSTUV = vmaxq_f32(voutSTUV, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
vst1q_f32(output + 12, voutCDEF);
vst1q_f32(output + 16, voutGHIJ);
vst1q_f32(output + 20, voutKLMN);
vst1q_f32(output + 24, voutOPQR);
vst1q_f32(output + 28, voutSTUV);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 32;
mc -= 32 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 16 * sizeof(float);
if (mc & (16 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
float32x4_t vacc89AB = vacc0123;
float32x4_t vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
const float32x4_t viCDEF = vld1q_f32(input + 12);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
xnn_prefetch_to_l1(input + 32);
const float32x4_t vb = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vfmaq_f32(vacc0123, vi0123, vb);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vb);
vacc89AB = vfmaq_f32(vacc89AB, vi89AB, vb);
vaccCDEF = vfmaq_f32(vaccCDEF, viCDEF, vb);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
voutCDEF = vmaxq_f32(voutCDEF, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
vst1q_f32(output + 12, voutCDEF);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 16;
}
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
xnn_prefetch_to_l1(input + 32);
const float32x4_t vb = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vfmaq_f32(vacc0123, vi0123, vb);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vb);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
xnn_prefetch_to_l1(input + 32);
const float32x4_t vb = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vfmaq_f32(vacc0123, vi0123, vb);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
xnn_prefetch_to_l1(input + 32);
const float32x2_t vb = vld1_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc01 = vfma_f32(vacc01, vi01, vb);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
xnn_prefetch_to_l1(input + 32);
const float32x2_t vb = vld1_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0 = vfma_f32(vacc0, vi0, vb);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 11,653 | 38.107383 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-32x1-minmax-neonfma-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_32x1__neonfma_x2(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 32 * sizeof(float);
while XNN_LIKELY(mc >= 32 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123x0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123x1 = vmovq_n_f32(0.0f);
float32x4_t vacc4567x0 = vacc0123x0;
float32x4_t vacc4567x1 = vmovq_n_f32(0.0f);
float32x4_t vacc89ABx0 = vacc0123x0;
float32x4_t vacc89ABx1 = vmovq_n_f32(0.0f);
float32x4_t vaccCDEFx0 = vacc0123x0;
float32x4_t vaccCDEFx1 = vmovq_n_f32(0.0f);
float32x4_t vaccGHIJx0 = vacc0123x0;
float32x4_t vaccGHIJx1 = vmovq_n_f32(0.0f);
float32x4_t vaccKLMNx0 = vacc0123x0;
float32x4_t vaccKLMNx1 = vmovq_n_f32(0.0f);
float32x4_t vaccOPQRx0 = vacc0123x0;
float32x4_t vaccOPQRx1 = vmovq_n_f32(0.0f);
float32x4_t vaccSTUVx0 = vacc0123x0;
float32x4_t vaccSTUVx1 = vmovq_n_f32(0.0f);
for (; nnz >= 2; nnz -= 2) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
dmap += 2;
const float32x4_t vi0123x0 = vld1q_f32(input);
const float32x4_t vi4567x0 = vld1q_f32(input + 4);
const float32x4_t vi89ABx0 = vld1q_f32(input + 8);
const float32x4_t viCDEFx0 = vld1q_f32(input + 12);
const float32x4_t viGHIJx0 = vld1q_f32(input + 16);
const float32x4_t viKLMNx0 = vld1q_f32(input + 20);
const float32x4_t viOPQRx0 = vld1q_f32(input + 24);
const float32x4_t viSTUVx0 = vld1q_f32(input + 28);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff0);
xnn_prefetch_to_l1(input + 16);
xnn_prefetch_to_l1(input + 32);
const float32x4_t vw0 = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123x0 = vfmaq_f32(vacc0123x0, vi0123x0, vw0);
vacc4567x0 = vfmaq_f32(vacc4567x0, vi4567x0, vw0);
vacc89ABx0 = vfmaq_f32(vacc89ABx0, vi89ABx0, vw0);
vaccCDEFx0 = vfmaq_f32(vaccCDEFx0, viCDEFx0, vw0);
vaccGHIJx0 = vfmaq_f32(vaccGHIJx0, viGHIJx0, vw0);
vaccKLMNx0 = vfmaq_f32(vaccKLMNx0, viKLMNx0, vw0);
vaccOPQRx0 = vfmaq_f32(vaccOPQRx0, viOPQRx0, vw0);
vaccSTUVx0 = vfmaq_f32(vaccSTUVx0, viSTUVx0, vw0);
const float32x4_t vi0123x1 = vld1q_f32(input);
const float32x4_t vi4567x1 = vld1q_f32(input + 4);
const float32x4_t vi89ABx1 = vld1q_f32(input + 8);
const float32x4_t viCDEFx1 = vld1q_f32(input + 12);
const float32x4_t viGHIJx1 = vld1q_f32(input + 16);
const float32x4_t viKLMNx1 = vld1q_f32(input + 20);
const float32x4_t viOPQRx1 = vld1q_f32(input + 24);
const float32x4_t viSTUVx1 = vld1q_f32(input + 28);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff1);
xnn_prefetch_to_l1(input + 16);
xnn_prefetch_to_l1(input + 32);
const float32x4_t vw1 = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123x1 = vfmaq_f32(vacc0123x1, vi0123x1, vw1);
vacc4567x1 = vfmaq_f32(vacc4567x1, vi4567x1, vw1);
vacc89ABx1 = vfmaq_f32(vacc89ABx1, vi89ABx1, vw1);
vaccCDEFx1 = vfmaq_f32(vaccCDEFx1, viCDEFx1, vw1);
vaccGHIJx1 = vfmaq_f32(vaccGHIJx1, viGHIJx1, vw1);
vaccKLMNx1 = vfmaq_f32(vaccKLMNx1, viKLMNx1, vw1);
vaccOPQRx1 = vfmaq_f32(vaccOPQRx1, viOPQRx1, vw1);
vaccSTUVx1 = vfmaq_f32(vaccSTUVx1, viSTUVx1, vw1);
}
float32x4_t vacc0123 = vacc0123x0;
float32x4_t vacc4567 = vacc4567x0;
float32x4_t vacc89AB = vacc89ABx0;
float32x4_t vaccCDEF = vaccCDEFx0;
float32x4_t vaccGHIJ = vaccGHIJx0;
float32x4_t vaccKLMN = vaccKLMNx0;
float32x4_t vaccOPQR = vaccOPQRx0;
float32x4_t vaccSTUV = vaccSTUVx0;
vacc0123 = vaddq_f32(vacc0123, vacc0123x1);
vacc4567 = vaddq_f32(vacc4567, vacc4567x1);
vacc89AB = vaddq_f32(vacc89AB, vacc89ABx1);
vaccCDEF = vaddq_f32(vaccCDEF, vaccCDEFx1);
vaccGHIJ = vaddq_f32(vaccGHIJ, vaccGHIJx1);
vaccKLMN = vaddq_f32(vaccKLMN, vaccKLMNx1);
vaccOPQR = vaddq_f32(vaccOPQR, vaccOPQRx1);
vaccSTUV = vaddq_f32(vaccSTUV, vaccSTUVx1);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
const float32x4_t viCDEF = vld1q_f32(input + 12);
const float32x4_t viGHIJ = vld1q_f32(input + 16);
const float32x4_t viKLMN = vld1q_f32(input + 20);
const float32x4_t viOPQR = vld1q_f32(input + 24);
const float32x4_t viSTUV = vld1q_f32(input + 28);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
xnn_prefetch_to_l1(input + 32);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
vacc89AB = vfmaq_f32(vacc89AB, vi89AB, vw);
vaccCDEF = vfmaq_f32(vaccCDEF, viCDEF, vw);
vaccGHIJ = vfmaq_f32(vaccGHIJ, viGHIJ, vw);
vaccKLMN = vfmaq_f32(vaccKLMN, viKLMN, vw);
vaccOPQR = vfmaq_f32(vaccOPQR, viOPQR, vw);
vaccSTUV = vfmaq_f32(vaccSTUV, viSTUV, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
float32x4_t voutGHIJ = vminq_f32(vaccGHIJ, vmax);
float32x4_t voutKLMN = vminq_f32(vaccKLMN, vmax);
float32x4_t voutOPQR = vminq_f32(vaccOPQR, vmax);
float32x4_t voutSTUV = vminq_f32(vaccSTUV, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
voutCDEF = vmaxq_f32(voutCDEF, vmin);
voutGHIJ = vmaxq_f32(voutGHIJ, vmin);
voutKLMN = vmaxq_f32(voutKLMN, vmin);
voutOPQR = vmaxq_f32(voutOPQR, vmin);
voutSTUV = vmaxq_f32(voutSTUV, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
vst1q_f32(output + 12, voutCDEF);
vst1q_f32(output + 16, voutGHIJ);
vst1q_f32(output + 20, voutKLMN);
vst1q_f32(output + 24, voutOPQR);
vst1q_f32(output + 28, voutSTUV);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 32;
mc -= 32 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 16 * sizeof(float);
if (mc & (16 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
float32x4_t vacc89AB = vacc0123;
float32x4_t vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
const float32x4_t viCDEF = vld1q_f32(input + 12);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
vacc89AB = vfmaq_f32(vacc89AB, vi89AB, vw);
vaccCDEF = vfmaq_f32(vaccCDEF, viCDEF, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
voutCDEF = vmaxq_f32(voutCDEF, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
vst1q_f32(output + 12, voutCDEF);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 16;
}
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vfma_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vfma_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 14,429 | 41.069971 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-32x1-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_32x1__neonfma(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 32 * sizeof(float);
while XNN_LIKELY(mc >= 32 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
float32x4_t vacc89AB = vacc0123;
float32x4_t vaccCDEF = vacc0123;
float32x4_t vaccGHIJ = vacc0123;
float32x4_t vaccKLMN = vacc0123;
float32x4_t vaccOPQR = vacc0123;
float32x4_t vaccSTUV = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
const float32x4_t viCDEF = vld1q_f32(input + 12);
const float32x4_t viGHIJ = vld1q_f32(input + 16);
const float32x4_t viKLMN = vld1q_f32(input + 20);
const float32x4_t viOPQR = vld1q_f32(input + 24);
const float32x4_t viSTUV = vld1q_f32(input + 28);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
xnn_prefetch_to_l1(input + 32);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
vacc89AB = vfmaq_f32(vacc89AB, vi89AB, vw);
vaccCDEF = vfmaq_f32(vaccCDEF, viCDEF, vw);
vaccGHIJ = vfmaq_f32(vaccGHIJ, viGHIJ, vw);
vaccKLMN = vfmaq_f32(vaccKLMN, viKLMN, vw);
vaccOPQR = vfmaq_f32(vaccOPQR, viOPQR, vw);
vaccSTUV = vfmaq_f32(vaccSTUV, viSTUV, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
float32x4_t voutGHIJ = vminq_f32(vaccGHIJ, vmax);
float32x4_t voutKLMN = vminq_f32(vaccKLMN, vmax);
float32x4_t voutOPQR = vminq_f32(vaccOPQR, vmax);
float32x4_t voutSTUV = vminq_f32(vaccSTUV, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
voutCDEF = vmaxq_f32(voutCDEF, vmin);
voutGHIJ = vmaxq_f32(voutGHIJ, vmin);
voutKLMN = vmaxq_f32(voutKLMN, vmin);
voutOPQR = vmaxq_f32(voutOPQR, vmin);
voutSTUV = vmaxq_f32(voutSTUV, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
vst1q_f32(output + 12, voutCDEF);
vst1q_f32(output + 16, voutGHIJ);
vst1q_f32(output + 20, voutKLMN);
vst1q_f32(output + 24, voutOPQR);
vst1q_f32(output + 28, voutSTUV);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 32;
mc -= 32 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 16 * sizeof(float);
if (mc & (16 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
float32x4_t vacc89AB = vacc0123;
float32x4_t vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
const float32x4_t viCDEF = vld1q_f32(input + 12);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
vacc89AB = vfmaq_f32(vacc89AB, vi89AB, vw);
vaccCDEF = vfmaq_f32(vaccCDEF, viCDEF, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
voutCDEF = vmaxq_f32(voutCDEF, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
vst1q_f32(output + 12, voutCDEF);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 16;
}
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vfma_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vfma_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 10,744 | 38.503676 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-32x1-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_32x1__sse(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const __m128 vmin = _mm_load_ps(params->sse.min);
const __m128 vmax = _mm_load_ps(params->sse.max);
size_t output_decrement = output_stride * nc - 32 * sizeof(float);
while XNN_LIKELY(mc >= 32 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
__m128 vacc0123 = _mm_load1_ps(w); w += 1;
__m128 vacc4567 = vacc0123;
__m128 vacc89AB = vacc0123;
__m128 vaccCDEF = vacc0123;
__m128 vaccGHIJ = vacc0123;
__m128 vaccKLMN = vacc0123;
__m128 vaccOPQR = vacc0123;
__m128 vaccSTUV = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const __m128 vi0123 = _mm_loadu_ps(input);
const __m128 vi4567 = _mm_loadu_ps(input + 4);
const __m128 vi89AB = _mm_loadu_ps(input + 8);
const __m128 viCDEF = _mm_loadu_ps(input + 12);
const __m128 viGHIJ = _mm_loadu_ps(input + 16);
const __m128 viKLMN = _mm_loadu_ps(input + 20);
const __m128 viOPQR = _mm_loadu_ps(input + 24);
const __m128 viSTUV = _mm_loadu_ps(input + 28);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const __m128 vw = _mm_load1_ps(w); w += 1;
vacc0123 = _mm_add_ps(vacc0123, _mm_mul_ps(vi0123, vw));
vacc4567 = _mm_add_ps(vacc4567, _mm_mul_ps(vi4567, vw));
vacc89AB = _mm_add_ps(vacc89AB, _mm_mul_ps(vi89AB, vw));
vaccCDEF = _mm_add_ps(vaccCDEF, _mm_mul_ps(viCDEF, vw));
vaccGHIJ = _mm_add_ps(vaccGHIJ, _mm_mul_ps(viGHIJ, vw));
vaccKLMN = _mm_add_ps(vaccKLMN, _mm_mul_ps(viKLMN, vw));
vaccOPQR = _mm_add_ps(vaccOPQR, _mm_mul_ps(viOPQR, vw));
vaccSTUV = _mm_add_ps(vaccSTUV, _mm_mul_ps(viSTUV, vw));
} while (--nnz != 0);
}
__m128 vout0123 = _mm_min_ps(vacc0123, vmax);
__m128 vout4567 = _mm_min_ps(vacc4567, vmax);
__m128 vout89AB = _mm_min_ps(vacc89AB, vmax);
__m128 voutCDEF = _mm_min_ps(vaccCDEF, vmax);
__m128 voutGHIJ = _mm_min_ps(vaccGHIJ, vmax);
__m128 voutKLMN = _mm_min_ps(vaccKLMN, vmax);
__m128 voutOPQR = _mm_min_ps(vaccOPQR, vmax);
__m128 voutSTUV = _mm_min_ps(vaccSTUV, vmax);
vout0123 = _mm_max_ps(vout0123, vmin);
vout4567 = _mm_max_ps(vout4567, vmin);
vout89AB = _mm_max_ps(vout89AB, vmin);
voutCDEF = _mm_max_ps(voutCDEF, vmin);
voutGHIJ = _mm_max_ps(voutGHIJ, vmin);
voutKLMN = _mm_max_ps(voutKLMN, vmin);
voutOPQR = _mm_max_ps(voutOPQR, vmin);
voutSTUV = _mm_max_ps(voutSTUV, vmin);
_mm_storeu_ps(output, vout0123);
_mm_storeu_ps(output + 4, vout4567);
_mm_storeu_ps(output + 8, vout89AB);
_mm_storeu_ps(output + 12, voutCDEF);
_mm_storeu_ps(output + 16, voutGHIJ);
_mm_storeu_ps(output + 20, voutKLMN);
_mm_storeu_ps(output + 24, voutOPQR);
_mm_storeu_ps(output + 28, voutSTUV);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 32;
mc -= 32 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 16 * sizeof(float);
if (mc & (16 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
__m128 vacc0123 = _mm_load1_ps(w); w += 1;
__m128 vacc4567 = vacc0123;
__m128 vacc89AB = vacc0123;
__m128 vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const __m128 vi0123 = _mm_loadu_ps(input);
const __m128 vi4567 = _mm_loadu_ps(input + 4);
const __m128 vi89AB = _mm_loadu_ps(input + 8);
const __m128 viCDEF = _mm_loadu_ps(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const __m128 vw = _mm_load1_ps(w); w += 1;
vacc0123 = _mm_add_ps(vacc0123, _mm_mul_ps(vi0123, vw));
vacc4567 = _mm_add_ps(vacc4567, _mm_mul_ps(vi4567, vw));
vacc89AB = _mm_add_ps(vacc89AB, _mm_mul_ps(vi89AB, vw));
vaccCDEF = _mm_add_ps(vaccCDEF, _mm_mul_ps(viCDEF, vw));
} while (--nnz != 0);
}
__m128 vout0123 = _mm_min_ps(vacc0123, vmax);
__m128 vout4567 = _mm_min_ps(vacc4567, vmax);
__m128 vout89AB = _mm_min_ps(vacc89AB, vmax);
__m128 voutCDEF = _mm_min_ps(vaccCDEF, vmax);
vout0123 = _mm_max_ps(vout0123, vmin);
vout4567 = _mm_max_ps(vout4567, vmin);
vout89AB = _mm_max_ps(vout89AB, vmin);
voutCDEF = _mm_max_ps(voutCDEF, vmin);
_mm_storeu_ps(output, vout0123);
_mm_storeu_ps(output + 4, vout4567);
_mm_storeu_ps(output + 8, vout89AB);
_mm_storeu_ps(output + 12, voutCDEF);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 16;
}
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
__m128 vacc0123 = _mm_load1_ps(w); w += 1;
__m128 vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const __m128 vi0123 = _mm_loadu_ps(input);
const __m128 vi4567 = _mm_loadu_ps(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const __m128 vw = _mm_load1_ps(w); w += 1;
vacc0123 = _mm_add_ps(vacc0123, _mm_mul_ps(vi0123, vw));
vacc4567 = _mm_add_ps(vacc4567, _mm_mul_ps(vi4567, vw));
} while (--nnz != 0);
}
__m128 vout0123 = _mm_min_ps(vacc0123, vmax);
__m128 vout4567 = _mm_min_ps(vacc4567, vmax);
vout0123 = _mm_max_ps(vout0123, vmin);
vout4567 = _mm_max_ps(vout4567, vmin);
_mm_storeu_ps(output, vout0123);
_mm_storeu_ps(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
__m128 vacc0123 = _mm_load1_ps(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const __m128 vi0123 = _mm_loadu_ps(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const __m128 vw = _mm_load1_ps(w); w += 1;
vacc0123 = _mm_add_ps(vacc0123, _mm_mul_ps(vi0123, vw));
} while (--nnz != 0);
}
__m128 vout0123 = _mm_min_ps(vacc0123, vmax);
vout0123 = _mm_max_ps(vout0123, vmin);
_mm_storeu_ps(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
__m128 vacc01 = _mm_load_ss(w); w += 1;
vacc01 = _mm_unpacklo_ps(vacc01, vacc01);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const __m128 vi01 = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
__m128 vw = _mm_load_ss(w); w += 1;
vw = _mm_unpacklo_ps(vw, vw);
vacc01 = _mm_add_ps(vacc01, _mm_mul_ps(vi01, vw));
} while (--nnz != 0);
}
__m128 vout01 = _mm_min_ps(vacc01, vmax);
vout01 = _mm_max_ps(vout01, vmin);
_mm_storel_pi((__m64*) output, vout01);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
__m128 vacc0 = _mm_load_ss(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const __m128 vi0 = _mm_load_ss(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const __m128 vw = _mm_load_ss(w); w += 1;
vacc0 = _mm_add_ss(vacc0, _mm_mul_ss(vi0, vw));
} while (--nnz != 0);
}
__m128 vout0 = _mm_min_ss(vacc0, vmax);
vout0 = _mm_max_ss(vout0, vmin);
_mm_store_ss(output, vout0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 10,565 | 39.328244 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-32x1-minmax-wasmsimd-arm-pipelined-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_arm_pipelined_x2(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 32 * sizeof(float);
while XNN_LIKELY(mc >= 32 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
v128_t vw = wasm_v128_load32_splat(w); w += 1;
intptr_t diff = *dmap++;
v128_t vi0123 = wasm_v128_load(input + 0);
v128_t vi4567 = wasm_v128_load(input + 4);
v128_t vi89AB = wasm_v128_load(input + 8);
v128_t viCDEF = wasm_v128_load(input + 12);
v128_t viGHIJ = wasm_v128_load(input + 16);
v128_t viKLMN = wasm_v128_load(input + 20);
v128_t viOPQR = wasm_v128_load(input + 24);
v128_t viSTUV = wasm_v128_load(input + 28);
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = vw;
v128_t vacc4567 = vw;
v128_t vacc89AB = vw;
v128_t vaccCDEF = vw;
v128_t vaccGHIJ = vw;
v128_t vaccKLMN = vw;
v128_t vaccOPQR = vw;
v128_t vaccSTUV = vw;
vw = wasm_v128_load32_splat(w); w += 1;
for (; nnz >= 2; nnz -= 2) {
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, wasm_f32x4_mul(viGHIJ, vw));
vaccKLMN = wasm_f32x4_add(vaccKLMN, wasm_f32x4_mul(viKLMN, vw));
vaccOPQR = wasm_f32x4_add(vaccOPQR, wasm_f32x4_mul(viOPQR, vw));
vaccSTUV = wasm_f32x4_add(vaccSTUV, wasm_f32x4_mul(viSTUV, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vi4567 = wasm_v128_load(input + 4);
vi89AB = wasm_v128_load(input + 8);
viCDEF = wasm_v128_load(input + 12);
viGHIJ = wasm_v128_load(input + 16);
viKLMN = wasm_v128_load(input + 20);
viOPQR = wasm_v128_load(input + 24);
viSTUV = wasm_v128_load(input + 28);
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, wasm_f32x4_mul(viGHIJ, vw));
vaccKLMN = wasm_f32x4_add(vaccKLMN, wasm_f32x4_mul(viKLMN, vw));
vaccOPQR = wasm_f32x4_add(vaccOPQR, wasm_f32x4_mul(viOPQR, vw));
vaccSTUV = wasm_f32x4_add(vaccSTUV, wasm_f32x4_mul(viSTUV, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vi4567 = wasm_v128_load(input + 4);
vi89AB = wasm_v128_load(input + 8);
viCDEF = wasm_v128_load(input + 12);
viGHIJ = wasm_v128_load(input + 16);
viKLMN = wasm_v128_load(input + 20);
viOPQR = wasm_v128_load(input + 24);
viSTUV = wasm_v128_load(input + 28);
}
if XNN_LIKELY(nnz != 0) {
do {
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, wasm_f32x4_mul(viGHIJ, vw));
vaccKLMN = wasm_f32x4_add(vaccKLMN, wasm_f32x4_mul(viKLMN, vw));
vaccOPQR = wasm_f32x4_add(vaccOPQR, wasm_f32x4_mul(viOPQR, vw));
vaccSTUV = wasm_f32x4_add(vaccSTUV, wasm_f32x4_mul(viSTUV, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vi4567 = wasm_v128_load(input + 4);
vi89AB = wasm_v128_load(input + 8);
viCDEF = wasm_v128_load(input + 12);
viGHIJ = wasm_v128_load(input + 16);
viKLMN = wasm_v128_load(input + 20);
viOPQR = wasm_v128_load(input + 24);
viSTUV = wasm_v128_load(input + 28);
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_min(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_min(vmax, vaccCDEF);
v128_t voutGHIJ = wasm_f32x4_min(vmax, vaccGHIJ);
v128_t voutKLMN = wasm_f32x4_min(vmax, vaccKLMN);
v128_t voutOPQR = wasm_f32x4_min(vmax, vaccOPQR);
v128_t voutSTUV = wasm_f32x4_min(vmax, vaccSTUV);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
vout89AB = wasm_f32x4_max(vmin, vout89AB);
voutCDEF = wasm_f32x4_max(vmin, voutCDEF);
voutGHIJ = wasm_f32x4_max(vmin, voutGHIJ);
voutKLMN = wasm_f32x4_max(vmin, voutKLMN);
voutOPQR = wasm_f32x4_max(vmin, voutOPQR);
voutSTUV = wasm_f32x4_max(vmin, voutSTUV);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
wasm_v128_store(output + 16, voutGHIJ);
wasm_v128_store(output + 20, voutKLMN);
wasm_v128_store(output + 24, voutOPQR);
wasm_v128_store(output + 28, voutSTUV);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 32;
mc -= 32 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 16 * sizeof(float);
if (mc & (16 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
v128_t vacc89AB = vacc0123;
v128_t vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_min(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_min(vmax, vaccCDEF);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
vout89AB = wasm_f32x4_max(vmin, vout89AB);
voutCDEF = wasm_f32x4_max(vmin, voutCDEF);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 16;
}
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
vout0123 = wasm_f32x4_max(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_min(vmax, vacc01);
vout01 = wasm_f32x4_max(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_min(vmax, vacc0);
vout0 = wasm_f32x4_max(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 13,590 | 41.605016 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-32x1-minmax-wasmsimd-arm-pipelined.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_arm_pipelined(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 32 * sizeof(float);
while XNN_LIKELY(mc >= 32 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
v128_t vw = wasm_v128_load32_splat(w); w += 1;
intptr_t diff = *dmap++;
v128_t vi0123 = wasm_v128_load(input + 0);
v128_t vi4567 = wasm_v128_load(input + 4);
v128_t vi89AB = wasm_v128_load(input + 8);
v128_t viCDEF = wasm_v128_load(input + 12);
v128_t viGHIJ = wasm_v128_load(input + 16);
v128_t viKLMN = wasm_v128_load(input + 20);
v128_t viOPQR = wasm_v128_load(input + 24);
v128_t viSTUV = wasm_v128_load(input + 28);
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = vw;
v128_t vacc4567 = vw;
v128_t vacc89AB = vw;
v128_t vaccCDEF = vw;
v128_t vaccGHIJ = vw;
v128_t vaccKLMN = vw;
v128_t vaccOPQR = vw;
v128_t vaccSTUV = vw;
vw = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, wasm_f32x4_mul(viGHIJ, vw));
vaccKLMN = wasm_f32x4_add(vaccKLMN, wasm_f32x4_mul(viKLMN, vw));
vaccOPQR = wasm_f32x4_add(vaccOPQR, wasm_f32x4_mul(viOPQR, vw));
vaccSTUV = wasm_f32x4_add(vaccSTUV, wasm_f32x4_mul(viSTUV, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vi4567 = wasm_v128_load(input + 4);
vi89AB = wasm_v128_load(input + 8);
viCDEF = wasm_v128_load(input + 12);
viGHIJ = wasm_v128_load(input + 16);
viKLMN = wasm_v128_load(input + 20);
viOPQR = wasm_v128_load(input + 24);
viSTUV = wasm_v128_load(input + 28);
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_min(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_min(vmax, vaccCDEF);
v128_t voutGHIJ = wasm_f32x4_min(vmax, vaccGHIJ);
v128_t voutKLMN = wasm_f32x4_min(vmax, vaccKLMN);
v128_t voutOPQR = wasm_f32x4_min(vmax, vaccOPQR);
v128_t voutSTUV = wasm_f32x4_min(vmax, vaccSTUV);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
vout89AB = wasm_f32x4_max(vmin, vout89AB);
voutCDEF = wasm_f32x4_max(vmin, voutCDEF);
voutGHIJ = wasm_f32x4_max(vmin, voutGHIJ);
voutKLMN = wasm_f32x4_max(vmin, voutKLMN);
voutOPQR = wasm_f32x4_max(vmin, voutOPQR);
voutSTUV = wasm_f32x4_max(vmin, voutSTUV);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
wasm_v128_store(output + 16, voutGHIJ);
wasm_v128_store(output + 20, voutKLMN);
wasm_v128_store(output + 24, voutOPQR);
wasm_v128_store(output + 28, voutSTUV);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 32;
mc -= 32 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 16 * sizeof(float);
if (mc & (16 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
v128_t vacc89AB = vacc0123;
v128_t vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_min(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_min(vmax, vaccCDEF);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
vout89AB = wasm_f32x4_max(vmin, vout89AB);
voutCDEF = wasm_f32x4_max(vmin, voutCDEF);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 16;
}
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
vout0123 = wasm_f32x4_max(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_min(vmax, vacc01);
vout01 = wasm_f32x4_max(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_min(vmax, vacc0);
vout0 = wasm_f32x4_max(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 11,328 | 39.605735 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-32x1-minmax-wasmsimd-arm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_arm_x2(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 32 * sizeof(float);
while XNN_LIKELY(mc >= 32 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123x0 = wasm_v128_load32_splat(w);
w += 1;
v128_t vacc0123x1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc4567x0 = vacc0123x0;
v128_t vacc4567x1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc89ABx0 = vacc0123x0;
v128_t vacc89ABx1 = wasm_f32x4_const_splat(0.0f);
v128_t vaccCDEFx0 = vacc0123x0;
v128_t vaccCDEFx1 = wasm_f32x4_const_splat(0.0f);
v128_t vaccGHIJx0 = vacc0123x0;
v128_t vaccGHIJx1 = wasm_f32x4_const_splat(0.0f);
v128_t vaccKLMNx0 = vacc0123x0;
v128_t vaccKLMNx1 = wasm_f32x4_const_splat(0.0f);
v128_t vaccOPQRx0 = vacc0123x0;
v128_t vaccOPQRx1 = wasm_f32x4_const_splat(0.0f);
v128_t vaccSTUVx0 = vacc0123x0;
v128_t vaccSTUVx1 = wasm_f32x4_const_splat(0.0f);
for (; nnz >= 2; nnz -= 2) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
dmap += 2;
const v128_t vi0123x0 = wasm_v128_load(input);
const v128_t vi4567x0 = wasm_v128_load(input + 4);
const v128_t vi89ABx0 = wasm_v128_load(input + 8);
const v128_t viCDEFx0 = wasm_v128_load(input + 12);
const v128_t viGHIJx0 = wasm_v128_load(input + 16);
const v128_t viKLMNx0 = wasm_v128_load(input + 20);
const v128_t viOPQRx0 = wasm_v128_load(input + 24);
const v128_t viSTUVx0 = wasm_v128_load(input + 28);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
const v128_t vw0 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x0 = wasm_f32x4_add(vacc0123x0, wasm_f32x4_mul(vi0123x0, vw0));
vacc4567x0 = wasm_f32x4_add(vacc4567x0, wasm_f32x4_mul(vi4567x0, vw0));
vacc89ABx0 = wasm_f32x4_add(vacc89ABx0, wasm_f32x4_mul(vi89ABx0, vw0));
vaccCDEFx0 = wasm_f32x4_add(vaccCDEFx0, wasm_f32x4_mul(viCDEFx0, vw0));
vaccGHIJx0 = wasm_f32x4_add(vaccGHIJx0, wasm_f32x4_mul(viGHIJx0, vw0));
vaccKLMNx0 = wasm_f32x4_add(vaccKLMNx0, wasm_f32x4_mul(viKLMNx0, vw0));
vaccOPQRx0 = wasm_f32x4_add(vaccOPQRx0, wasm_f32x4_mul(viOPQRx0, vw0));
vaccSTUVx0 = wasm_f32x4_add(vaccSTUVx0, wasm_f32x4_mul(viSTUVx0, vw0));
const v128_t vi0123x1 = wasm_v128_load(input);
const v128_t vi4567x1 = wasm_v128_load(input + 4);
const v128_t vi89ABx1 = wasm_v128_load(input + 8);
const v128_t viCDEFx1 = wasm_v128_load(input + 12);
const v128_t viGHIJx1 = wasm_v128_load(input + 16);
const v128_t viKLMNx1 = wasm_v128_load(input + 20);
const v128_t viOPQRx1 = wasm_v128_load(input + 24);
const v128_t viSTUVx1 = wasm_v128_load(input + 28);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
const v128_t vw1 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x1 = wasm_f32x4_add(vacc0123x1, wasm_f32x4_mul(vi0123x1, vw1));
vacc4567x1 = wasm_f32x4_add(vacc4567x1, wasm_f32x4_mul(vi4567x1, vw1));
vacc89ABx1 = wasm_f32x4_add(vacc89ABx1, wasm_f32x4_mul(vi89ABx1, vw1));
vaccCDEFx1 = wasm_f32x4_add(vaccCDEFx1, wasm_f32x4_mul(viCDEFx1, vw1));
vaccGHIJx1 = wasm_f32x4_add(vaccGHIJx1, wasm_f32x4_mul(viGHIJx1, vw1));
vaccKLMNx1 = wasm_f32x4_add(vaccKLMNx1, wasm_f32x4_mul(viKLMNx1, vw1));
vaccOPQRx1 = wasm_f32x4_add(vaccOPQRx1, wasm_f32x4_mul(viOPQRx1, vw1));
vaccSTUVx1 = wasm_f32x4_add(vaccSTUVx1, wasm_f32x4_mul(viSTUVx1, vw1));
}
v128_t vacc0123 = vacc0123x0;
v128_t vacc4567 = vacc4567x0;
v128_t vacc89AB = vacc89ABx0;
v128_t vaccCDEF = vaccCDEFx0;
v128_t vaccGHIJ = vaccGHIJx0;
v128_t vaccKLMN = vaccKLMNx0;
v128_t vaccOPQR = vaccOPQRx0;
v128_t vaccSTUV = vaccSTUVx0;
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x1);
vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x1);
vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx1);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx1);
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, vaccGHIJx1);
vaccKLMN = wasm_f32x4_add(vaccKLMN, vaccKLMNx1);
vaccOPQR = wasm_f32x4_add(vaccOPQR, vaccOPQRx1);
vaccSTUV = wasm_f32x4_add(vaccSTUV, vaccSTUVx1);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
const v128_t viGHIJ = wasm_v128_load(input + 16);
const v128_t viKLMN = wasm_v128_load(input + 20);
const v128_t viOPQR = wasm_v128_load(input + 24);
const v128_t viSTUV = wasm_v128_load(input + 28);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, wasm_f32x4_mul(viGHIJ, vw));
vaccKLMN = wasm_f32x4_add(vaccKLMN, wasm_f32x4_mul(viKLMN, vw));
vaccOPQR = wasm_f32x4_add(vaccOPQR, wasm_f32x4_mul(viOPQR, vw));
vaccSTUV = wasm_f32x4_add(vaccSTUV, wasm_f32x4_mul(viSTUV, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_min(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_min(vmax, vaccCDEF);
v128_t voutGHIJ = wasm_f32x4_min(vmax, vaccGHIJ);
v128_t voutKLMN = wasm_f32x4_min(vmax, vaccKLMN);
v128_t voutOPQR = wasm_f32x4_min(vmax, vaccOPQR);
v128_t voutSTUV = wasm_f32x4_min(vmax, vaccSTUV);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
vout89AB = wasm_f32x4_max(vmin, vout89AB);
voutCDEF = wasm_f32x4_max(vmin, voutCDEF);
voutGHIJ = wasm_f32x4_max(vmin, voutGHIJ);
voutKLMN = wasm_f32x4_max(vmin, voutKLMN);
voutOPQR = wasm_f32x4_max(vmin, voutOPQR);
voutSTUV = wasm_f32x4_max(vmin, voutSTUV);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
wasm_v128_store(output + 16, voutGHIJ);
wasm_v128_store(output + 20, voutKLMN);
wasm_v128_store(output + 24, voutOPQR);
wasm_v128_store(output + 28, voutSTUV);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 32;
mc -= 32 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 16 * sizeof(float);
if (mc & (16 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
v128_t vacc89AB = vacc0123;
v128_t vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_min(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_min(vmax, vaccCDEF);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
vout89AB = wasm_f32x4_max(vmin, vout89AB);
voutCDEF = wasm_f32x4_max(vmin, voutCDEF);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 16;
}
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
vout0123 = wasm_f32x4_max(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_min(vmax, vacc01);
vout01 = wasm_f32x4_max(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_min(vmax, vacc0);
vout0 = wasm_f32x4_max(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 14,874 | 43.66967 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-32x1-minmax-wasmsimd-arm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_arm_x4(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 32 * sizeof(float);
while XNN_LIKELY(mc >= 32 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123x0 = wasm_v128_load32_splat(w);
w += 1;
v128_t vacc0123x1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc0123x2 = wasm_f32x4_const_splat(0.0f);
v128_t vacc0123x3 = wasm_f32x4_const_splat(0.0f);
v128_t vacc4567x0 = vacc0123x0;
v128_t vacc4567x1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc4567x2 = wasm_f32x4_const_splat(0.0f);
v128_t vacc4567x3 = wasm_f32x4_const_splat(0.0f);
v128_t vacc89ABx0 = vacc0123x0;
v128_t vacc89ABx1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc89ABx2 = wasm_f32x4_const_splat(0.0f);
v128_t vacc89ABx3 = wasm_f32x4_const_splat(0.0f);
v128_t vaccCDEFx0 = vacc0123x0;
v128_t vaccCDEFx1 = wasm_f32x4_const_splat(0.0f);
v128_t vaccCDEFx2 = wasm_f32x4_const_splat(0.0f);
v128_t vaccCDEFx3 = wasm_f32x4_const_splat(0.0f);
v128_t vaccGHIJx0 = vacc0123x0;
v128_t vaccGHIJx1 = wasm_f32x4_const_splat(0.0f);
v128_t vaccGHIJx2 = wasm_f32x4_const_splat(0.0f);
v128_t vaccGHIJx3 = wasm_f32x4_const_splat(0.0f);
v128_t vaccKLMNx0 = vacc0123x0;
v128_t vaccKLMNx1 = wasm_f32x4_const_splat(0.0f);
v128_t vaccKLMNx2 = wasm_f32x4_const_splat(0.0f);
v128_t vaccKLMNx3 = wasm_f32x4_const_splat(0.0f);
v128_t vaccOPQRx0 = vacc0123x0;
v128_t vaccOPQRx1 = wasm_f32x4_const_splat(0.0f);
v128_t vaccOPQRx2 = wasm_f32x4_const_splat(0.0f);
v128_t vaccOPQRx3 = wasm_f32x4_const_splat(0.0f);
v128_t vaccSTUVx0 = vacc0123x0;
v128_t vaccSTUVx1 = wasm_f32x4_const_splat(0.0f);
v128_t vaccSTUVx2 = wasm_f32x4_const_splat(0.0f);
v128_t vaccSTUVx3 = wasm_f32x4_const_splat(0.0f);
for (; nnz >= 4; nnz -= 4) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
const intptr_t diff2 = dmap[2];
const intptr_t diff3 = dmap[3];
dmap += 4;
const v128_t vi0123x0 = wasm_v128_load(input);
const v128_t vi4567x0 = wasm_v128_load(input + 4);
const v128_t vi89ABx0 = wasm_v128_load(input + 8);
const v128_t viCDEFx0 = wasm_v128_load(input + 12);
const v128_t viGHIJx0 = wasm_v128_load(input + 16);
const v128_t viKLMNx0 = wasm_v128_load(input + 20);
const v128_t viOPQRx0 = wasm_v128_load(input + 24);
const v128_t viSTUVx0 = wasm_v128_load(input + 28);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
const v128_t vw0 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x0 = wasm_f32x4_add(vacc0123x0, wasm_f32x4_mul(vi0123x0, vw0));
vacc4567x0 = wasm_f32x4_add(vacc4567x0, wasm_f32x4_mul(vi4567x0, vw0));
vacc89ABx0 = wasm_f32x4_add(vacc89ABx0, wasm_f32x4_mul(vi89ABx0, vw0));
vaccCDEFx0 = wasm_f32x4_add(vaccCDEFx0, wasm_f32x4_mul(viCDEFx0, vw0));
vaccGHIJx0 = wasm_f32x4_add(vaccGHIJx0, wasm_f32x4_mul(viGHIJx0, vw0));
vaccKLMNx0 = wasm_f32x4_add(vaccKLMNx0, wasm_f32x4_mul(viKLMNx0, vw0));
vaccOPQRx0 = wasm_f32x4_add(vaccOPQRx0, wasm_f32x4_mul(viOPQRx0, vw0));
vaccSTUVx0 = wasm_f32x4_add(vaccSTUVx0, wasm_f32x4_mul(viSTUVx0, vw0));
const v128_t vi0123x1 = wasm_v128_load(input);
const v128_t vi4567x1 = wasm_v128_load(input + 4);
const v128_t vi89ABx1 = wasm_v128_load(input + 8);
const v128_t viCDEFx1 = wasm_v128_load(input + 12);
const v128_t viGHIJx1 = wasm_v128_load(input + 16);
const v128_t viKLMNx1 = wasm_v128_load(input + 20);
const v128_t viOPQRx1 = wasm_v128_load(input + 24);
const v128_t viSTUVx1 = wasm_v128_load(input + 28);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
const v128_t vw1 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x1 = wasm_f32x4_add(vacc0123x1, wasm_f32x4_mul(vi0123x1, vw1));
vacc4567x1 = wasm_f32x4_add(vacc4567x1, wasm_f32x4_mul(vi4567x1, vw1));
vacc89ABx1 = wasm_f32x4_add(vacc89ABx1, wasm_f32x4_mul(vi89ABx1, vw1));
vaccCDEFx1 = wasm_f32x4_add(vaccCDEFx1, wasm_f32x4_mul(viCDEFx1, vw1));
vaccGHIJx1 = wasm_f32x4_add(vaccGHIJx1, wasm_f32x4_mul(viGHIJx1, vw1));
vaccKLMNx1 = wasm_f32x4_add(vaccKLMNx1, wasm_f32x4_mul(viKLMNx1, vw1));
vaccOPQRx1 = wasm_f32x4_add(vaccOPQRx1, wasm_f32x4_mul(viOPQRx1, vw1));
vaccSTUVx1 = wasm_f32x4_add(vaccSTUVx1, wasm_f32x4_mul(viSTUVx1, vw1));
const v128_t vi0123x2 = wasm_v128_load(input);
const v128_t vi4567x2 = wasm_v128_load(input + 4);
const v128_t vi89ABx2 = wasm_v128_load(input + 8);
const v128_t viCDEFx2 = wasm_v128_load(input + 12);
const v128_t viGHIJx2 = wasm_v128_load(input + 16);
const v128_t viKLMNx2 = wasm_v128_load(input + 20);
const v128_t viOPQRx2 = wasm_v128_load(input + 24);
const v128_t viSTUVx2 = wasm_v128_load(input + 28);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff2);
const v128_t vw2 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x2 = wasm_f32x4_add(vacc0123x2, wasm_f32x4_mul(vi0123x2, vw2));
vacc4567x2 = wasm_f32x4_add(vacc4567x2, wasm_f32x4_mul(vi4567x2, vw2));
vacc89ABx2 = wasm_f32x4_add(vacc89ABx2, wasm_f32x4_mul(vi89ABx2, vw2));
vaccCDEFx2 = wasm_f32x4_add(vaccCDEFx2, wasm_f32x4_mul(viCDEFx2, vw2));
vaccGHIJx2 = wasm_f32x4_add(vaccGHIJx2, wasm_f32x4_mul(viGHIJx2, vw2));
vaccKLMNx2 = wasm_f32x4_add(vaccKLMNx2, wasm_f32x4_mul(viKLMNx2, vw2));
vaccOPQRx2 = wasm_f32x4_add(vaccOPQRx2, wasm_f32x4_mul(viOPQRx2, vw2));
vaccSTUVx2 = wasm_f32x4_add(vaccSTUVx2, wasm_f32x4_mul(viSTUVx2, vw2));
const v128_t vi0123x3 = wasm_v128_load(input);
const v128_t vi4567x3 = wasm_v128_load(input + 4);
const v128_t vi89ABx3 = wasm_v128_load(input + 8);
const v128_t viCDEFx3 = wasm_v128_load(input + 12);
const v128_t viGHIJx3 = wasm_v128_load(input + 16);
const v128_t viKLMNx3 = wasm_v128_load(input + 20);
const v128_t viOPQRx3 = wasm_v128_load(input + 24);
const v128_t viSTUVx3 = wasm_v128_load(input + 28);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff3);
const v128_t vw3 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x3 = wasm_f32x4_add(vacc0123x3, wasm_f32x4_mul(vi0123x3, vw3));
vacc4567x3 = wasm_f32x4_add(vacc4567x3, wasm_f32x4_mul(vi4567x3, vw3));
vacc89ABx3 = wasm_f32x4_add(vacc89ABx3, wasm_f32x4_mul(vi89ABx3, vw3));
vaccCDEFx3 = wasm_f32x4_add(vaccCDEFx3, wasm_f32x4_mul(viCDEFx3, vw3));
vaccGHIJx3 = wasm_f32x4_add(vaccGHIJx3, wasm_f32x4_mul(viGHIJx3, vw3));
vaccKLMNx3 = wasm_f32x4_add(vaccKLMNx3, wasm_f32x4_mul(viKLMNx3, vw3));
vaccOPQRx3 = wasm_f32x4_add(vaccOPQRx3, wasm_f32x4_mul(viOPQRx3, vw3));
vaccSTUVx3 = wasm_f32x4_add(vaccSTUVx3, wasm_f32x4_mul(viSTUVx3, vw3));
}
v128_t vacc0123 = vacc0123x0;
v128_t vacc4567 = vacc4567x0;
v128_t vacc89AB = vacc89ABx0;
v128_t vaccCDEF = vaccCDEFx0;
v128_t vaccGHIJ = vaccGHIJx0;
v128_t vaccKLMN = vaccKLMNx0;
v128_t vaccOPQR = vaccOPQRx0;
v128_t vaccSTUV = vaccSTUVx0;
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x1);
vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x1);
vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx1);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx1);
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, vaccGHIJx1);
vaccKLMN = wasm_f32x4_add(vaccKLMN, vaccKLMNx1);
vaccOPQR = wasm_f32x4_add(vaccOPQR, vaccOPQRx1);
vaccSTUV = wasm_f32x4_add(vaccSTUV, vaccSTUVx1);
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x2);
vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x2);
vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx2);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx2);
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, vaccGHIJx2);
vaccKLMN = wasm_f32x4_add(vaccKLMN, vaccKLMNx2);
vaccOPQR = wasm_f32x4_add(vaccOPQR, vaccOPQRx2);
vaccSTUV = wasm_f32x4_add(vaccSTUV, vaccSTUVx2);
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x3);
vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x3);
vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx3);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx3);
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, vaccGHIJx3);
vaccKLMN = wasm_f32x4_add(vaccKLMN, vaccKLMNx3);
vaccOPQR = wasm_f32x4_add(vaccOPQR, vaccOPQRx3);
vaccSTUV = wasm_f32x4_add(vaccSTUV, vaccSTUVx3);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
const v128_t viGHIJ = wasm_v128_load(input + 16);
const v128_t viKLMN = wasm_v128_load(input + 20);
const v128_t viOPQR = wasm_v128_load(input + 24);
const v128_t viSTUV = wasm_v128_load(input + 28);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, wasm_f32x4_mul(viGHIJ, vw));
vaccKLMN = wasm_f32x4_add(vaccKLMN, wasm_f32x4_mul(viKLMN, vw));
vaccOPQR = wasm_f32x4_add(vaccOPQR, wasm_f32x4_mul(viOPQR, vw));
vaccSTUV = wasm_f32x4_add(vaccSTUV, wasm_f32x4_mul(viSTUV, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_min(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_min(vmax, vaccCDEF);
v128_t voutGHIJ = wasm_f32x4_min(vmax, vaccGHIJ);
v128_t voutKLMN = wasm_f32x4_min(vmax, vaccKLMN);
v128_t voutOPQR = wasm_f32x4_min(vmax, vaccOPQR);
v128_t voutSTUV = wasm_f32x4_min(vmax, vaccSTUV);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
vout89AB = wasm_f32x4_max(vmin, vout89AB);
voutCDEF = wasm_f32x4_max(vmin, voutCDEF);
voutGHIJ = wasm_f32x4_max(vmin, voutGHIJ);
voutKLMN = wasm_f32x4_max(vmin, voutKLMN);
voutOPQR = wasm_f32x4_max(vmin, voutOPQR);
voutSTUV = wasm_f32x4_max(vmin, voutSTUV);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
wasm_v128_store(output + 16, voutGHIJ);
wasm_v128_store(output + 20, voutKLMN);
wasm_v128_store(output + 24, voutOPQR);
wasm_v128_store(output + 28, voutSTUV);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 32;
mc -= 32 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 16 * sizeof(float);
if (mc & (16 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
v128_t vacc89AB = vacc0123;
v128_t vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_min(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_min(vmax, vaccCDEF);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
vout89AB = wasm_f32x4_max(vmin, vout89AB);
voutCDEF = wasm_f32x4_max(vmin, voutCDEF);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 16;
}
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
vout0123 = wasm_f32x4_max(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_min(vmax, vacc01);
vout01 = wasm_f32x4_max(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_min(vmax, vacc0);
vout0 = wasm_f32x4_max(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 19,256 | 46.548148 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-32x1-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_arm(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 32 * sizeof(float);
while XNN_LIKELY(mc >= 32 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
v128_t vacc89AB = vacc0123;
v128_t vaccCDEF = vacc0123;
v128_t vaccGHIJ = vacc0123;
v128_t vaccKLMN = vacc0123;
v128_t vaccOPQR = vacc0123;
v128_t vaccSTUV = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
const v128_t viGHIJ = wasm_v128_load(input + 16);
const v128_t viKLMN = wasm_v128_load(input + 20);
const v128_t viOPQR = wasm_v128_load(input + 24);
const v128_t viSTUV = wasm_v128_load(input + 28);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, wasm_f32x4_mul(viGHIJ, vw));
vaccKLMN = wasm_f32x4_add(vaccKLMN, wasm_f32x4_mul(viKLMN, vw));
vaccOPQR = wasm_f32x4_add(vaccOPQR, wasm_f32x4_mul(viOPQR, vw));
vaccSTUV = wasm_f32x4_add(vaccSTUV, wasm_f32x4_mul(viSTUV, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_min(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_min(vmax, vaccCDEF);
v128_t voutGHIJ = wasm_f32x4_min(vmax, vaccGHIJ);
v128_t voutKLMN = wasm_f32x4_min(vmax, vaccKLMN);
v128_t voutOPQR = wasm_f32x4_min(vmax, vaccOPQR);
v128_t voutSTUV = wasm_f32x4_min(vmax, vaccSTUV);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
vout89AB = wasm_f32x4_max(vmin, vout89AB);
voutCDEF = wasm_f32x4_max(vmin, voutCDEF);
voutGHIJ = wasm_f32x4_max(vmin, voutGHIJ);
voutKLMN = wasm_f32x4_max(vmin, voutKLMN);
voutOPQR = wasm_f32x4_max(vmin, voutOPQR);
voutSTUV = wasm_f32x4_max(vmin, voutSTUV);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
wasm_v128_store(output + 16, voutGHIJ);
wasm_v128_store(output + 20, voutKLMN);
wasm_v128_store(output + 24, voutOPQR);
wasm_v128_store(output + 28, voutSTUV);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 32;
mc -= 32 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 16 * sizeof(float);
if (mc & (16 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
v128_t vacc89AB = vacc0123;
v128_t vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_min(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_min(vmax, vaccCDEF);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
vout89AB = wasm_f32x4_max(vmin, vout89AB);
voutCDEF = wasm_f32x4_max(vmin, voutCDEF);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 16;
}
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
vout0123 = wasm_f32x4_max(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_min(vmax, vacc01);
vout01 = wasm_f32x4_max(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_min(vmax, vacc0);
vout0 = wasm_f32x4_max(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 10,991 | 40.479245 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-32x1-minmax-wasmsimd-x86-pipelined-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_x86_pipelined_x2(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 32 * sizeof(float);
while XNN_LIKELY(mc >= 32 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
v128_t vw = wasm_v128_load32_splat(w); w += 1;
intptr_t diff = *dmap++;
v128_t vi0123 = wasm_v128_load(input + 0);
v128_t vi4567 = wasm_v128_load(input + 4);
v128_t vi89AB = wasm_v128_load(input + 8);
v128_t viCDEF = wasm_v128_load(input + 12);
v128_t viGHIJ = wasm_v128_load(input + 16);
v128_t viKLMN = wasm_v128_load(input + 20);
v128_t viOPQR = wasm_v128_load(input + 24);
v128_t viSTUV = wasm_v128_load(input + 28);
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = vw;
v128_t vacc4567 = vw;
v128_t vacc89AB = vw;
v128_t vaccCDEF = vw;
v128_t vaccGHIJ = vw;
v128_t vaccKLMN = vw;
v128_t vaccOPQR = vw;
v128_t vaccSTUV = vw;
vw = wasm_v128_load32_splat(w); w += 1;
for (; nnz >= 2; nnz -= 2) {
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, wasm_f32x4_mul(viGHIJ, vw));
vaccKLMN = wasm_f32x4_add(vaccKLMN, wasm_f32x4_mul(viKLMN, vw));
vaccOPQR = wasm_f32x4_add(vaccOPQR, wasm_f32x4_mul(viOPQR, vw));
vaccSTUV = wasm_f32x4_add(vaccSTUV, wasm_f32x4_mul(viSTUV, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vi4567 = wasm_v128_load(input + 4);
vi89AB = wasm_v128_load(input + 8);
viCDEF = wasm_v128_load(input + 12);
viGHIJ = wasm_v128_load(input + 16);
viKLMN = wasm_v128_load(input + 20);
viOPQR = wasm_v128_load(input + 24);
viSTUV = wasm_v128_load(input + 28);
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, wasm_f32x4_mul(viGHIJ, vw));
vaccKLMN = wasm_f32x4_add(vaccKLMN, wasm_f32x4_mul(viKLMN, vw));
vaccOPQR = wasm_f32x4_add(vaccOPQR, wasm_f32x4_mul(viOPQR, vw));
vaccSTUV = wasm_f32x4_add(vaccSTUV, wasm_f32x4_mul(viSTUV, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vi4567 = wasm_v128_load(input + 4);
vi89AB = wasm_v128_load(input + 8);
viCDEF = wasm_v128_load(input + 12);
viGHIJ = wasm_v128_load(input + 16);
viKLMN = wasm_v128_load(input + 20);
viOPQR = wasm_v128_load(input + 24);
viSTUV = wasm_v128_load(input + 28);
}
if XNN_LIKELY(nnz != 0) {
do {
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, wasm_f32x4_mul(viGHIJ, vw));
vaccKLMN = wasm_f32x4_add(vaccKLMN, wasm_f32x4_mul(viKLMN, vw));
vaccOPQR = wasm_f32x4_add(vaccOPQR, wasm_f32x4_mul(viOPQR, vw));
vaccSTUV = wasm_f32x4_add(vaccSTUV, wasm_f32x4_mul(viSTUV, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vi4567 = wasm_v128_load(input + 4);
vi89AB = wasm_v128_load(input + 8);
viCDEF = wasm_v128_load(input + 12);
viGHIJ = wasm_v128_load(input + 16);
viKLMN = wasm_v128_load(input + 20);
viOPQR = wasm_v128_load(input + 24);
viSTUV = wasm_v128_load(input + 28);
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_pmin(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_pmin(vmax, vaccCDEF);
v128_t voutGHIJ = wasm_f32x4_pmin(vmax, vaccGHIJ);
v128_t voutKLMN = wasm_f32x4_pmin(vmax, vaccKLMN);
v128_t voutOPQR = wasm_f32x4_pmin(vmax, vaccOPQR);
v128_t voutSTUV = wasm_f32x4_pmin(vmax, vaccSTUV);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
vout89AB = wasm_f32x4_pmax(vmin, vout89AB);
voutCDEF = wasm_f32x4_pmax(vmin, voutCDEF);
voutGHIJ = wasm_f32x4_pmax(vmin, voutGHIJ);
voutKLMN = wasm_f32x4_pmax(vmin, voutKLMN);
voutOPQR = wasm_f32x4_pmax(vmin, voutOPQR);
voutSTUV = wasm_f32x4_pmax(vmin, voutSTUV);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
wasm_v128_store(output + 16, voutGHIJ);
wasm_v128_store(output + 20, voutKLMN);
wasm_v128_store(output + 24, voutOPQR);
wasm_v128_store(output + 28, voutSTUV);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 32;
mc -= 32 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 16 * sizeof(float);
if (mc & (16 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
v128_t vacc89AB = vacc0123;
v128_t vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_pmin(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_pmin(vmax, vaccCDEF);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
vout89AB = wasm_f32x4_pmax(vmin, vout89AB);
voutCDEF = wasm_f32x4_pmax(vmin, voutCDEF);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 16;
}
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_pmin(vmax, vacc01);
vout01 = wasm_f32x4_pmax(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_pmin(vmax, vacc0);
vout0 = wasm_f32x4_pmax(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 13,624 | 41.711599 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-32x1-minmax-wasmsimd-x86-pipelined.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_x86_pipelined(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 32 * sizeof(float);
while XNN_LIKELY(mc >= 32 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
v128_t vw = wasm_v128_load32_splat(w); w += 1;
intptr_t diff = *dmap++;
v128_t vi0123 = wasm_v128_load(input + 0);
v128_t vi4567 = wasm_v128_load(input + 4);
v128_t vi89AB = wasm_v128_load(input + 8);
v128_t viCDEF = wasm_v128_load(input + 12);
v128_t viGHIJ = wasm_v128_load(input + 16);
v128_t viKLMN = wasm_v128_load(input + 20);
v128_t viOPQR = wasm_v128_load(input + 24);
v128_t viSTUV = wasm_v128_load(input + 28);
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = vw;
v128_t vacc4567 = vw;
v128_t vacc89AB = vw;
v128_t vaccCDEF = vw;
v128_t vaccGHIJ = vw;
v128_t vaccKLMN = vw;
v128_t vaccOPQR = vw;
v128_t vaccSTUV = vw;
vw = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, wasm_f32x4_mul(viGHIJ, vw));
vaccKLMN = wasm_f32x4_add(vaccKLMN, wasm_f32x4_mul(viKLMN, vw));
vaccOPQR = wasm_f32x4_add(vaccOPQR, wasm_f32x4_mul(viOPQR, vw));
vaccSTUV = wasm_f32x4_add(vaccSTUV, wasm_f32x4_mul(viSTUV, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vi4567 = wasm_v128_load(input + 4);
vi89AB = wasm_v128_load(input + 8);
viCDEF = wasm_v128_load(input + 12);
viGHIJ = wasm_v128_load(input + 16);
viKLMN = wasm_v128_load(input + 20);
viOPQR = wasm_v128_load(input + 24);
viSTUV = wasm_v128_load(input + 28);
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_pmin(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_pmin(vmax, vaccCDEF);
v128_t voutGHIJ = wasm_f32x4_pmin(vmax, vaccGHIJ);
v128_t voutKLMN = wasm_f32x4_pmin(vmax, vaccKLMN);
v128_t voutOPQR = wasm_f32x4_pmin(vmax, vaccOPQR);
v128_t voutSTUV = wasm_f32x4_pmin(vmax, vaccSTUV);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
vout89AB = wasm_f32x4_pmax(vmin, vout89AB);
voutCDEF = wasm_f32x4_pmax(vmin, voutCDEF);
voutGHIJ = wasm_f32x4_pmax(vmin, voutGHIJ);
voutKLMN = wasm_f32x4_pmax(vmin, voutKLMN);
voutOPQR = wasm_f32x4_pmax(vmin, voutOPQR);
voutSTUV = wasm_f32x4_pmax(vmin, voutSTUV);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
wasm_v128_store(output + 16, voutGHIJ);
wasm_v128_store(output + 20, voutKLMN);
wasm_v128_store(output + 24, voutOPQR);
wasm_v128_store(output + 28, voutSTUV);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 32;
mc -= 32 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 16 * sizeof(float);
if (mc & (16 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
v128_t vacc89AB = vacc0123;
v128_t vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_pmin(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_pmin(vmax, vaccCDEF);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
vout89AB = wasm_f32x4_pmax(vmin, vout89AB);
voutCDEF = wasm_f32x4_pmax(vmin, voutCDEF);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 16;
}
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_pmin(vmax, vacc01);
vout01 = wasm_f32x4_pmax(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_pmin(vmax, vacc0);
vout0 = wasm_f32x4_pmax(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 11,362 | 39.727599 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-32x1-minmax-wasmsimd-x86-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_x86_x2(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 32 * sizeof(float);
while XNN_LIKELY(mc >= 32 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123x0 = wasm_v128_load32_splat(w);
w += 1;
v128_t vacc0123x1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc4567x0 = vacc0123x0;
v128_t vacc4567x1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc89ABx0 = vacc0123x0;
v128_t vacc89ABx1 = wasm_f32x4_const_splat(0.0f);
v128_t vaccCDEFx0 = vacc0123x0;
v128_t vaccCDEFx1 = wasm_f32x4_const_splat(0.0f);
v128_t vaccGHIJx0 = vacc0123x0;
v128_t vaccGHIJx1 = wasm_f32x4_const_splat(0.0f);
v128_t vaccKLMNx0 = vacc0123x0;
v128_t vaccKLMNx1 = wasm_f32x4_const_splat(0.0f);
v128_t vaccOPQRx0 = vacc0123x0;
v128_t vaccOPQRx1 = wasm_f32x4_const_splat(0.0f);
v128_t vaccSTUVx0 = vacc0123x0;
v128_t vaccSTUVx1 = wasm_f32x4_const_splat(0.0f);
for (; nnz >= 2; nnz -= 2) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
dmap += 2;
const v128_t vi0123x0 = wasm_v128_load(input);
const v128_t vi4567x0 = wasm_v128_load(input + 4);
const v128_t vi89ABx0 = wasm_v128_load(input + 8);
const v128_t viCDEFx0 = wasm_v128_load(input + 12);
const v128_t viGHIJx0 = wasm_v128_load(input + 16);
const v128_t viKLMNx0 = wasm_v128_load(input + 20);
const v128_t viOPQRx0 = wasm_v128_load(input + 24);
const v128_t viSTUVx0 = wasm_v128_load(input + 28);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
const v128_t vw0 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x0 = wasm_f32x4_add(vacc0123x0, wasm_f32x4_mul(vi0123x0, vw0));
vacc4567x0 = wasm_f32x4_add(vacc4567x0, wasm_f32x4_mul(vi4567x0, vw0));
vacc89ABx0 = wasm_f32x4_add(vacc89ABx0, wasm_f32x4_mul(vi89ABx0, vw0));
vaccCDEFx0 = wasm_f32x4_add(vaccCDEFx0, wasm_f32x4_mul(viCDEFx0, vw0));
vaccGHIJx0 = wasm_f32x4_add(vaccGHIJx0, wasm_f32x4_mul(viGHIJx0, vw0));
vaccKLMNx0 = wasm_f32x4_add(vaccKLMNx0, wasm_f32x4_mul(viKLMNx0, vw0));
vaccOPQRx0 = wasm_f32x4_add(vaccOPQRx0, wasm_f32x4_mul(viOPQRx0, vw0));
vaccSTUVx0 = wasm_f32x4_add(vaccSTUVx0, wasm_f32x4_mul(viSTUVx0, vw0));
const v128_t vi0123x1 = wasm_v128_load(input);
const v128_t vi4567x1 = wasm_v128_load(input + 4);
const v128_t vi89ABx1 = wasm_v128_load(input + 8);
const v128_t viCDEFx1 = wasm_v128_load(input + 12);
const v128_t viGHIJx1 = wasm_v128_load(input + 16);
const v128_t viKLMNx1 = wasm_v128_load(input + 20);
const v128_t viOPQRx1 = wasm_v128_load(input + 24);
const v128_t viSTUVx1 = wasm_v128_load(input + 28);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
const v128_t vw1 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x1 = wasm_f32x4_add(vacc0123x1, wasm_f32x4_mul(vi0123x1, vw1));
vacc4567x1 = wasm_f32x4_add(vacc4567x1, wasm_f32x4_mul(vi4567x1, vw1));
vacc89ABx1 = wasm_f32x4_add(vacc89ABx1, wasm_f32x4_mul(vi89ABx1, vw1));
vaccCDEFx1 = wasm_f32x4_add(vaccCDEFx1, wasm_f32x4_mul(viCDEFx1, vw1));
vaccGHIJx1 = wasm_f32x4_add(vaccGHIJx1, wasm_f32x4_mul(viGHIJx1, vw1));
vaccKLMNx1 = wasm_f32x4_add(vaccKLMNx1, wasm_f32x4_mul(viKLMNx1, vw1));
vaccOPQRx1 = wasm_f32x4_add(vaccOPQRx1, wasm_f32x4_mul(viOPQRx1, vw1));
vaccSTUVx1 = wasm_f32x4_add(vaccSTUVx1, wasm_f32x4_mul(viSTUVx1, vw1));
}
v128_t vacc0123 = vacc0123x0;
v128_t vacc4567 = vacc4567x0;
v128_t vacc89AB = vacc89ABx0;
v128_t vaccCDEF = vaccCDEFx0;
v128_t vaccGHIJ = vaccGHIJx0;
v128_t vaccKLMN = vaccKLMNx0;
v128_t vaccOPQR = vaccOPQRx0;
v128_t vaccSTUV = vaccSTUVx0;
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x1);
vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x1);
vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx1);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx1);
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, vaccGHIJx1);
vaccKLMN = wasm_f32x4_add(vaccKLMN, vaccKLMNx1);
vaccOPQR = wasm_f32x4_add(vaccOPQR, vaccOPQRx1);
vaccSTUV = wasm_f32x4_add(vaccSTUV, vaccSTUVx1);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
const v128_t viGHIJ = wasm_v128_load(input + 16);
const v128_t viKLMN = wasm_v128_load(input + 20);
const v128_t viOPQR = wasm_v128_load(input + 24);
const v128_t viSTUV = wasm_v128_load(input + 28);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, wasm_f32x4_mul(viGHIJ, vw));
vaccKLMN = wasm_f32x4_add(vaccKLMN, wasm_f32x4_mul(viKLMN, vw));
vaccOPQR = wasm_f32x4_add(vaccOPQR, wasm_f32x4_mul(viOPQR, vw));
vaccSTUV = wasm_f32x4_add(vaccSTUV, wasm_f32x4_mul(viSTUV, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_pmin(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_pmin(vmax, vaccCDEF);
v128_t voutGHIJ = wasm_f32x4_pmin(vmax, vaccGHIJ);
v128_t voutKLMN = wasm_f32x4_pmin(vmax, vaccKLMN);
v128_t voutOPQR = wasm_f32x4_pmin(vmax, vaccOPQR);
v128_t voutSTUV = wasm_f32x4_pmin(vmax, vaccSTUV);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
vout89AB = wasm_f32x4_pmax(vmin, vout89AB);
voutCDEF = wasm_f32x4_pmax(vmin, voutCDEF);
voutGHIJ = wasm_f32x4_pmax(vmin, voutGHIJ);
voutKLMN = wasm_f32x4_pmax(vmin, voutKLMN);
voutOPQR = wasm_f32x4_pmax(vmin, voutOPQR);
voutSTUV = wasm_f32x4_pmax(vmin, voutSTUV);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
wasm_v128_store(output + 16, voutGHIJ);
wasm_v128_store(output + 20, voutKLMN);
wasm_v128_store(output + 24, voutOPQR);
wasm_v128_store(output + 28, voutSTUV);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 32;
mc -= 32 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 16 * sizeof(float);
if (mc & (16 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
v128_t vacc89AB = vacc0123;
v128_t vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_pmin(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_pmin(vmax, vaccCDEF);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
vout89AB = wasm_f32x4_pmax(vmin, vout89AB);
voutCDEF = wasm_f32x4_pmax(vmin, voutCDEF);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 16;
}
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_pmin(vmax, vacc01);
vout01 = wasm_f32x4_pmax(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_pmin(vmax, vacc0);
vout0 = wasm_f32x4_pmax(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 14,908 | 43.771772 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-32x1-minmax-wasmsimd-x86-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_x86_x4(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 32 * sizeof(float);
while XNN_LIKELY(mc >= 32 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123x0 = wasm_v128_load32_splat(w);
w += 1;
v128_t vacc0123x1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc0123x2 = wasm_f32x4_const_splat(0.0f);
v128_t vacc0123x3 = wasm_f32x4_const_splat(0.0f);
v128_t vacc4567x0 = vacc0123x0;
v128_t vacc4567x1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc4567x2 = wasm_f32x4_const_splat(0.0f);
v128_t vacc4567x3 = wasm_f32x4_const_splat(0.0f);
v128_t vacc89ABx0 = vacc0123x0;
v128_t vacc89ABx1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc89ABx2 = wasm_f32x4_const_splat(0.0f);
v128_t vacc89ABx3 = wasm_f32x4_const_splat(0.0f);
v128_t vaccCDEFx0 = vacc0123x0;
v128_t vaccCDEFx1 = wasm_f32x4_const_splat(0.0f);
v128_t vaccCDEFx2 = wasm_f32x4_const_splat(0.0f);
v128_t vaccCDEFx3 = wasm_f32x4_const_splat(0.0f);
v128_t vaccGHIJx0 = vacc0123x0;
v128_t vaccGHIJx1 = wasm_f32x4_const_splat(0.0f);
v128_t vaccGHIJx2 = wasm_f32x4_const_splat(0.0f);
v128_t vaccGHIJx3 = wasm_f32x4_const_splat(0.0f);
v128_t vaccKLMNx0 = vacc0123x0;
v128_t vaccKLMNx1 = wasm_f32x4_const_splat(0.0f);
v128_t vaccKLMNx2 = wasm_f32x4_const_splat(0.0f);
v128_t vaccKLMNx3 = wasm_f32x4_const_splat(0.0f);
v128_t vaccOPQRx0 = vacc0123x0;
v128_t vaccOPQRx1 = wasm_f32x4_const_splat(0.0f);
v128_t vaccOPQRx2 = wasm_f32x4_const_splat(0.0f);
v128_t vaccOPQRx3 = wasm_f32x4_const_splat(0.0f);
v128_t vaccSTUVx0 = vacc0123x0;
v128_t vaccSTUVx1 = wasm_f32x4_const_splat(0.0f);
v128_t vaccSTUVx2 = wasm_f32x4_const_splat(0.0f);
v128_t vaccSTUVx3 = wasm_f32x4_const_splat(0.0f);
for (; nnz >= 4; nnz -= 4) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
const intptr_t diff2 = dmap[2];
const intptr_t diff3 = dmap[3];
dmap += 4;
const v128_t vi0123x0 = wasm_v128_load(input);
const v128_t vi4567x0 = wasm_v128_load(input + 4);
const v128_t vi89ABx0 = wasm_v128_load(input + 8);
const v128_t viCDEFx0 = wasm_v128_load(input + 12);
const v128_t viGHIJx0 = wasm_v128_load(input + 16);
const v128_t viKLMNx0 = wasm_v128_load(input + 20);
const v128_t viOPQRx0 = wasm_v128_load(input + 24);
const v128_t viSTUVx0 = wasm_v128_load(input + 28);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
const v128_t vw0 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x0 = wasm_f32x4_add(vacc0123x0, wasm_f32x4_mul(vi0123x0, vw0));
vacc4567x0 = wasm_f32x4_add(vacc4567x0, wasm_f32x4_mul(vi4567x0, vw0));
vacc89ABx0 = wasm_f32x4_add(vacc89ABx0, wasm_f32x4_mul(vi89ABx0, vw0));
vaccCDEFx0 = wasm_f32x4_add(vaccCDEFx0, wasm_f32x4_mul(viCDEFx0, vw0));
vaccGHIJx0 = wasm_f32x4_add(vaccGHIJx0, wasm_f32x4_mul(viGHIJx0, vw0));
vaccKLMNx0 = wasm_f32x4_add(vaccKLMNx0, wasm_f32x4_mul(viKLMNx0, vw0));
vaccOPQRx0 = wasm_f32x4_add(vaccOPQRx0, wasm_f32x4_mul(viOPQRx0, vw0));
vaccSTUVx0 = wasm_f32x4_add(vaccSTUVx0, wasm_f32x4_mul(viSTUVx0, vw0));
const v128_t vi0123x1 = wasm_v128_load(input);
const v128_t vi4567x1 = wasm_v128_load(input + 4);
const v128_t vi89ABx1 = wasm_v128_load(input + 8);
const v128_t viCDEFx1 = wasm_v128_load(input + 12);
const v128_t viGHIJx1 = wasm_v128_load(input + 16);
const v128_t viKLMNx1 = wasm_v128_load(input + 20);
const v128_t viOPQRx1 = wasm_v128_load(input + 24);
const v128_t viSTUVx1 = wasm_v128_load(input + 28);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
const v128_t vw1 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x1 = wasm_f32x4_add(vacc0123x1, wasm_f32x4_mul(vi0123x1, vw1));
vacc4567x1 = wasm_f32x4_add(vacc4567x1, wasm_f32x4_mul(vi4567x1, vw1));
vacc89ABx1 = wasm_f32x4_add(vacc89ABx1, wasm_f32x4_mul(vi89ABx1, vw1));
vaccCDEFx1 = wasm_f32x4_add(vaccCDEFx1, wasm_f32x4_mul(viCDEFx1, vw1));
vaccGHIJx1 = wasm_f32x4_add(vaccGHIJx1, wasm_f32x4_mul(viGHIJx1, vw1));
vaccKLMNx1 = wasm_f32x4_add(vaccKLMNx1, wasm_f32x4_mul(viKLMNx1, vw1));
vaccOPQRx1 = wasm_f32x4_add(vaccOPQRx1, wasm_f32x4_mul(viOPQRx1, vw1));
vaccSTUVx1 = wasm_f32x4_add(vaccSTUVx1, wasm_f32x4_mul(viSTUVx1, vw1));
const v128_t vi0123x2 = wasm_v128_load(input);
const v128_t vi4567x2 = wasm_v128_load(input + 4);
const v128_t vi89ABx2 = wasm_v128_load(input + 8);
const v128_t viCDEFx2 = wasm_v128_load(input + 12);
const v128_t viGHIJx2 = wasm_v128_load(input + 16);
const v128_t viKLMNx2 = wasm_v128_load(input + 20);
const v128_t viOPQRx2 = wasm_v128_load(input + 24);
const v128_t viSTUVx2 = wasm_v128_load(input + 28);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff2);
const v128_t vw2 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x2 = wasm_f32x4_add(vacc0123x2, wasm_f32x4_mul(vi0123x2, vw2));
vacc4567x2 = wasm_f32x4_add(vacc4567x2, wasm_f32x4_mul(vi4567x2, vw2));
vacc89ABx2 = wasm_f32x4_add(vacc89ABx2, wasm_f32x4_mul(vi89ABx2, vw2));
vaccCDEFx2 = wasm_f32x4_add(vaccCDEFx2, wasm_f32x4_mul(viCDEFx2, vw2));
vaccGHIJx2 = wasm_f32x4_add(vaccGHIJx2, wasm_f32x4_mul(viGHIJx2, vw2));
vaccKLMNx2 = wasm_f32x4_add(vaccKLMNx2, wasm_f32x4_mul(viKLMNx2, vw2));
vaccOPQRx2 = wasm_f32x4_add(vaccOPQRx2, wasm_f32x4_mul(viOPQRx2, vw2));
vaccSTUVx2 = wasm_f32x4_add(vaccSTUVx2, wasm_f32x4_mul(viSTUVx2, vw2));
const v128_t vi0123x3 = wasm_v128_load(input);
const v128_t vi4567x3 = wasm_v128_load(input + 4);
const v128_t vi89ABx3 = wasm_v128_load(input + 8);
const v128_t viCDEFx3 = wasm_v128_load(input + 12);
const v128_t viGHIJx3 = wasm_v128_load(input + 16);
const v128_t viKLMNx3 = wasm_v128_load(input + 20);
const v128_t viOPQRx3 = wasm_v128_load(input + 24);
const v128_t viSTUVx3 = wasm_v128_load(input + 28);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff3);
const v128_t vw3 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x3 = wasm_f32x4_add(vacc0123x3, wasm_f32x4_mul(vi0123x3, vw3));
vacc4567x3 = wasm_f32x4_add(vacc4567x3, wasm_f32x4_mul(vi4567x3, vw3));
vacc89ABx3 = wasm_f32x4_add(vacc89ABx3, wasm_f32x4_mul(vi89ABx3, vw3));
vaccCDEFx3 = wasm_f32x4_add(vaccCDEFx3, wasm_f32x4_mul(viCDEFx3, vw3));
vaccGHIJx3 = wasm_f32x4_add(vaccGHIJx3, wasm_f32x4_mul(viGHIJx3, vw3));
vaccKLMNx3 = wasm_f32x4_add(vaccKLMNx3, wasm_f32x4_mul(viKLMNx3, vw3));
vaccOPQRx3 = wasm_f32x4_add(vaccOPQRx3, wasm_f32x4_mul(viOPQRx3, vw3));
vaccSTUVx3 = wasm_f32x4_add(vaccSTUVx3, wasm_f32x4_mul(viSTUVx3, vw3));
}
v128_t vacc0123 = vacc0123x0;
v128_t vacc4567 = vacc4567x0;
v128_t vacc89AB = vacc89ABx0;
v128_t vaccCDEF = vaccCDEFx0;
v128_t vaccGHIJ = vaccGHIJx0;
v128_t vaccKLMN = vaccKLMNx0;
v128_t vaccOPQR = vaccOPQRx0;
v128_t vaccSTUV = vaccSTUVx0;
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x1);
vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x1);
vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx1);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx1);
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, vaccGHIJx1);
vaccKLMN = wasm_f32x4_add(vaccKLMN, vaccKLMNx1);
vaccOPQR = wasm_f32x4_add(vaccOPQR, vaccOPQRx1);
vaccSTUV = wasm_f32x4_add(vaccSTUV, vaccSTUVx1);
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x2);
vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x2);
vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx2);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx2);
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, vaccGHIJx2);
vaccKLMN = wasm_f32x4_add(vaccKLMN, vaccKLMNx2);
vaccOPQR = wasm_f32x4_add(vaccOPQR, vaccOPQRx2);
vaccSTUV = wasm_f32x4_add(vaccSTUV, vaccSTUVx2);
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x3);
vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x3);
vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx3);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx3);
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, vaccGHIJx3);
vaccKLMN = wasm_f32x4_add(vaccKLMN, vaccKLMNx3);
vaccOPQR = wasm_f32x4_add(vaccOPQR, vaccOPQRx3);
vaccSTUV = wasm_f32x4_add(vaccSTUV, vaccSTUVx3);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
const v128_t viGHIJ = wasm_v128_load(input + 16);
const v128_t viKLMN = wasm_v128_load(input + 20);
const v128_t viOPQR = wasm_v128_load(input + 24);
const v128_t viSTUV = wasm_v128_load(input + 28);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, wasm_f32x4_mul(viGHIJ, vw));
vaccKLMN = wasm_f32x4_add(vaccKLMN, wasm_f32x4_mul(viKLMN, vw));
vaccOPQR = wasm_f32x4_add(vaccOPQR, wasm_f32x4_mul(viOPQR, vw));
vaccSTUV = wasm_f32x4_add(vaccSTUV, wasm_f32x4_mul(viSTUV, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_pmin(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_pmin(vmax, vaccCDEF);
v128_t voutGHIJ = wasm_f32x4_pmin(vmax, vaccGHIJ);
v128_t voutKLMN = wasm_f32x4_pmin(vmax, vaccKLMN);
v128_t voutOPQR = wasm_f32x4_pmin(vmax, vaccOPQR);
v128_t voutSTUV = wasm_f32x4_pmin(vmax, vaccSTUV);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
vout89AB = wasm_f32x4_pmax(vmin, vout89AB);
voutCDEF = wasm_f32x4_pmax(vmin, voutCDEF);
voutGHIJ = wasm_f32x4_pmax(vmin, voutGHIJ);
voutKLMN = wasm_f32x4_pmax(vmin, voutKLMN);
voutOPQR = wasm_f32x4_pmax(vmin, voutOPQR);
voutSTUV = wasm_f32x4_pmax(vmin, voutSTUV);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
wasm_v128_store(output + 16, voutGHIJ);
wasm_v128_store(output + 20, voutKLMN);
wasm_v128_store(output + 24, voutOPQR);
wasm_v128_store(output + 28, voutSTUV);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 32;
mc -= 32 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 16 * sizeof(float);
if (mc & (16 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
v128_t vacc89AB = vacc0123;
v128_t vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_pmin(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_pmin(vmax, vaccCDEF);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
vout89AB = wasm_f32x4_pmax(vmin, vout89AB);
voutCDEF = wasm_f32x4_pmax(vmin, voutCDEF);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 16;
}
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_pmin(vmax, vacc01);
vout01 = wasm_f32x4_pmax(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_pmin(vmax, vacc0);
vout0 = wasm_f32x4_pmax(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 19,290 | 46.632099 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-32x1-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_x86(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 32 * sizeof(float);
while XNN_LIKELY(mc >= 32 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
v128_t vacc89AB = vacc0123;
v128_t vaccCDEF = vacc0123;
v128_t vaccGHIJ = vacc0123;
v128_t vaccKLMN = vacc0123;
v128_t vaccOPQR = vacc0123;
v128_t vaccSTUV = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
const v128_t viGHIJ = wasm_v128_load(input + 16);
const v128_t viKLMN = wasm_v128_load(input + 20);
const v128_t viOPQR = wasm_v128_load(input + 24);
const v128_t viSTUV = wasm_v128_load(input + 28);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, wasm_f32x4_mul(viGHIJ, vw));
vaccKLMN = wasm_f32x4_add(vaccKLMN, wasm_f32x4_mul(viKLMN, vw));
vaccOPQR = wasm_f32x4_add(vaccOPQR, wasm_f32x4_mul(viOPQR, vw));
vaccSTUV = wasm_f32x4_add(vaccSTUV, wasm_f32x4_mul(viSTUV, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_pmin(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_pmin(vmax, vaccCDEF);
v128_t voutGHIJ = wasm_f32x4_pmin(vmax, vaccGHIJ);
v128_t voutKLMN = wasm_f32x4_pmin(vmax, vaccKLMN);
v128_t voutOPQR = wasm_f32x4_pmin(vmax, vaccOPQR);
v128_t voutSTUV = wasm_f32x4_pmin(vmax, vaccSTUV);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
vout89AB = wasm_f32x4_pmax(vmin, vout89AB);
voutCDEF = wasm_f32x4_pmax(vmin, voutCDEF);
voutGHIJ = wasm_f32x4_pmax(vmin, voutGHIJ);
voutKLMN = wasm_f32x4_pmax(vmin, voutKLMN);
voutOPQR = wasm_f32x4_pmax(vmin, voutOPQR);
voutSTUV = wasm_f32x4_pmax(vmin, voutSTUV);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
wasm_v128_store(output + 16, voutGHIJ);
wasm_v128_store(output + 20, voutKLMN);
wasm_v128_store(output + 24, voutOPQR);
wasm_v128_store(output + 28, voutSTUV);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 32;
mc -= 32 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 16 * sizeof(float);
if (mc & (16 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
v128_t vacc89AB = vacc0123;
v128_t vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
v128_t vout89AB = wasm_f32x4_pmin(vmax, vacc89AB);
v128_t voutCDEF = wasm_f32x4_pmin(vmax, vaccCDEF);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
vout89AB = wasm_f32x4_pmax(vmin, vout89AB);
voutCDEF = wasm_f32x4_pmax(vmin, voutCDEF);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
wasm_v128_store(output + 8, vout89AB);
wasm_v128_store(output + 12, voutCDEF);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 16;
}
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_pmin(vmax, vacc01);
vout01 = wasm_f32x4_pmax(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_pmin(vmax, vacc0);
vout0 = wasm_f32x4_pmax(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 11,025 | 40.607547 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-32x2-minmax-aarch64-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon-blocked.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_32x2__aarch64_neonfma(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 32 * sizeof(float);
while XNN_LIKELY(mc >= 32 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123n0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n0 = vacc0123n0;
float32x4_t vacc89ABn0 = vacc0123n0;
float32x4_t vaccCDEFn0 = vacc0123n0;
float32x4_t vaccGHIJn0 = vacc0123n0;
float32x4_t vaccKLMNn0 = vacc0123n0;
float32x4_t vaccOPQRn0 = vacc0123n0;
float32x4_t vaccSTUVn0 = vacc0123n0;
float32x4_t vacc0123n1 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n1 = vacc0123n1;
float32x4_t vacc89ABn1 = vacc0123n1;
float32x4_t vaccCDEFn1 = vacc0123n1;
float32x4_t vaccGHIJn1 = vacc0123n1;
float32x4_t vaccKLMNn1 = vacc0123n1;
float32x4_t vaccOPQRn1 = vacc0123n1;
float32x4_t vaccSTUVn1 = vacc0123n1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
const float32x4_t viCDEF = vld1q_f32(input + 12);
const float32x4_t viGHIJ = vld1q_f32(input + 16);
const float32x4_t viKLMN = vld1q_f32(input + 20);
const float32x4_t viOPQR = vld1q_f32(input + 24);
const float32x4_t viSTUV = vld1q_f32(input + 28);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
xnn_prefetch_to_l1(input + 32);
const float32x2_t vw = vld1_f32(w); w += 2;
xnn_prefetch_to_l1(w + 32);
vacc0123n0 = vfmaq_lane_f32(vacc0123n0, vi0123, vw, 0);
vacc4567n0 = vfmaq_lane_f32(vacc4567n0, vi4567, vw, 0);
vacc89ABn0 = vfmaq_lane_f32(vacc89ABn0, vi89AB, vw, 0);
vaccCDEFn0 = vfmaq_lane_f32(vaccCDEFn0, viCDEF, vw, 0);
vaccGHIJn0 = vfmaq_lane_f32(vaccGHIJn0, viGHIJ, vw, 0);
vaccKLMNn0 = vfmaq_lane_f32(vaccKLMNn0, viKLMN, vw, 0);
vaccOPQRn0 = vfmaq_lane_f32(vaccOPQRn0, viOPQR, vw, 0);
vaccSTUVn0 = vfmaq_lane_f32(vaccSTUVn0, viSTUV, vw, 0);
vacc0123n1 = vfmaq_lane_f32(vacc0123n1, vi0123, vw, 1);
vacc4567n1 = vfmaq_lane_f32(vacc4567n1, vi4567, vw, 1);
vacc89ABn1 = vfmaq_lane_f32(vacc89ABn1, vi89AB, vw, 1);
vaccCDEFn1 = vfmaq_lane_f32(vaccCDEFn1, viCDEF, vw, 1);
vaccGHIJn1 = vfmaq_lane_f32(vaccGHIJn1, viGHIJ, vw, 1);
vaccKLMNn1 = vfmaq_lane_f32(vaccKLMNn1, viKLMN, vw, 1);
vaccOPQRn1 = vfmaq_lane_f32(vaccOPQRn1, viOPQR, vw, 1);
vaccSTUVn1 = vfmaq_lane_f32(vaccSTUVn1, viSTUV, vw, 1);
} while (--nnz != 0);
}
float32x4_t vout0123n0 = vminq_f32(vacc0123n0, vmax);
float32x4_t vout4567n0 = vminq_f32(vacc4567n0, vmax);
float32x4_t vout89ABn0 = vminq_f32(vacc89ABn0, vmax);
float32x4_t voutCDEFn0 = vminq_f32(vaccCDEFn0, vmax);
float32x4_t voutGHIJn0 = vminq_f32(vaccGHIJn0, vmax);
float32x4_t voutKLMNn0 = vminq_f32(vaccKLMNn0, vmax);
float32x4_t voutOPQRn0 = vminq_f32(vaccOPQRn0, vmax);
float32x4_t voutSTUVn0 = vminq_f32(vaccSTUVn0, vmax);
float32x4_t vout0123n1 = vminq_f32(vacc0123n1, vmax);
float32x4_t vout4567n1 = vminq_f32(vacc4567n1, vmax);
float32x4_t vout89ABn1 = vminq_f32(vacc89ABn1, vmax);
float32x4_t voutCDEFn1 = vminq_f32(vaccCDEFn1, vmax);
float32x4_t voutGHIJn1 = vminq_f32(vaccGHIJn1, vmax);
float32x4_t voutKLMNn1 = vminq_f32(vaccKLMNn1, vmax);
float32x4_t voutOPQRn1 = vminq_f32(vaccOPQRn1, vmax);
float32x4_t voutSTUVn1 = vminq_f32(vaccSTUVn1, vmax);
vout0123n0 = vmaxq_f32(vout0123n0, vmin);
vout4567n0 = vmaxq_f32(vout4567n0, vmin);
vout89ABn0 = vmaxq_f32(vout89ABn0, vmin);
voutCDEFn0 = vmaxq_f32(voutCDEFn0, vmin);
voutGHIJn0 = vmaxq_f32(voutGHIJn0, vmin);
voutKLMNn0 = vmaxq_f32(voutKLMNn0, vmin);
voutOPQRn0 = vmaxq_f32(voutOPQRn0, vmin);
voutSTUVn0 = vmaxq_f32(voutSTUVn0, vmin);
vout0123n1 = vmaxq_f32(vout0123n1, vmin);
vout4567n1 = vmaxq_f32(vout4567n1, vmin);
vout89ABn1 = vmaxq_f32(vout89ABn1, vmin);
voutCDEFn1 = vmaxq_f32(voutCDEFn1, vmin);
voutGHIJn1 = vmaxq_f32(voutGHIJn1, vmin);
voutKLMNn1 = vmaxq_f32(voutKLMNn1, vmin);
voutOPQRn1 = vmaxq_f32(voutOPQRn1, vmin);
voutSTUVn1 = vmaxq_f32(voutSTUVn1, vmin);
vst1q_f32(output + 0, vout0123n0);
vst1q_f32(output + 4, vout4567n0);
vst1q_f32(output + 8, vout89ABn0);
vst1q_f32(output + 12, voutCDEFn0);
vst1q_f32(output + 16, voutGHIJn0);
vst1q_f32(output + 20, voutKLMNn0);
vst1q_f32(output + 24, voutOPQRn0);
vst1q_f32(output + 28, voutSTUVn0);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n1);
vst1q_f32(output + 4, vout4567n1);
vst1q_f32(output + 8, vout89ABn1);
vst1q_f32(output + 12, voutCDEFn1);
vst1q_f32(output + 16, voutGHIJn1);
vst1q_f32(output + 20, voutKLMNn1);
vst1q_f32(output + 24, voutOPQRn1);
vst1q_f32(output + 28, voutSTUVn1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 2;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
float32x4_t vacc89AB = vacc0123;
float32x4_t vaccCDEF = vacc0123;
float32x4_t vaccGHIJ = vacc0123;
float32x4_t vaccKLMN = vacc0123;
float32x4_t vaccOPQR = vacc0123;
float32x4_t vaccSTUV = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
const float32x4_t viCDEF = vld1q_f32(input + 12);
const float32x4_t viGHIJ = vld1q_f32(input + 16);
const float32x4_t viKLMN = vld1q_f32(input + 20);
const float32x4_t viOPQR = vld1q_f32(input + 24);
const float32x4_t viSTUV = vld1q_f32(input + 28);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
xnn_prefetch_to_l1(input + 32);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
vacc89AB = vfmaq_f32(vacc89AB, vi89AB, vw);
vaccCDEF = vfmaq_f32(vaccCDEF, viCDEF, vw);
vaccGHIJ = vfmaq_f32(vaccGHIJ, viGHIJ, vw);
vaccKLMN = vfmaq_f32(vaccKLMN, viKLMN, vw);
vaccOPQR = vfmaq_f32(vaccOPQR, viOPQR, vw);
vaccSTUV = vfmaq_f32(vaccSTUV, viSTUV, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
float32x4_t voutGHIJ = vminq_f32(vaccGHIJ, vmax);
float32x4_t voutKLMN = vminq_f32(vaccKLMN, vmax);
float32x4_t voutOPQR = vminq_f32(vaccOPQR, vmax);
float32x4_t voutSTUV = vminq_f32(vaccSTUV, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
voutCDEF = vmaxq_f32(voutCDEF, vmin);
voutGHIJ = vmaxq_f32(voutGHIJ, vmin);
voutKLMN = vmaxq_f32(voutKLMN, vmin);
voutOPQR = vmaxq_f32(voutOPQR, vmin);
voutSTUV = vmaxq_f32(voutSTUV, vmin);
vst1q_f32(output + 0, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
vst1q_f32(output + 12, voutCDEF);
vst1q_f32(output + 16, voutGHIJ);
vst1q_f32(output + 20, voutKLMN);
vst1q_f32(output + 24, voutOPQR);
vst1q_f32(output + 28, voutSTUV);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 32;
mc -= 32 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 16 * sizeof(float);
if (mc & (16 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123n0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n0 = vacc0123n0;
float32x4_t vacc89ABn0 = vacc0123n0;
float32x4_t vaccCDEFn0 = vacc0123n0;
float32x4_t vacc0123n1 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n1 = vacc0123n1;
float32x4_t vacc89ABn1 = vacc0123n1;
float32x4_t vaccCDEFn1 = vacc0123n1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
const float32x4_t viCDEF = vld1q_f32(input + 12);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_f32(w); w += 2;
vacc0123n0 = vfmaq_lane_f32(vacc0123n0, vi0123, vw, 0);
vacc4567n0 = vfmaq_lane_f32(vacc4567n0, vi4567, vw, 0);
vacc89ABn0 = vfmaq_lane_f32(vacc89ABn0, vi89AB, vw, 0);
vaccCDEFn0 = vfmaq_lane_f32(vaccCDEFn0, viCDEF, vw, 0);
vacc0123n1 = vfmaq_lane_f32(vacc0123n1, vi0123, vw, 1);
vacc4567n1 = vfmaq_lane_f32(vacc4567n1, vi4567, vw, 1);
vacc89ABn1 = vfmaq_lane_f32(vacc89ABn1, vi89AB, vw, 1);
vaccCDEFn1 = vfmaq_lane_f32(vaccCDEFn1, viCDEF, vw, 1);
} while (--nnz != 0);
}
float32x4_t vout0123n0 = vminq_f32(vacc0123n0, vmax);
float32x4_t vout4567n0 = vminq_f32(vacc4567n0, vmax);
float32x4_t vout89ABn0 = vminq_f32(vacc89ABn0, vmax);
float32x4_t voutCDEFn0 = vminq_f32(vaccCDEFn0, vmax);
float32x4_t vout0123n1 = vminq_f32(vacc0123n1, vmax);
float32x4_t vout4567n1 = vminq_f32(vacc4567n1, vmax);
float32x4_t vout89ABn1 = vminq_f32(vacc89ABn1, vmax);
float32x4_t voutCDEFn1 = vminq_f32(vaccCDEFn1, vmax);
vout0123n0 = vmaxq_f32(vout0123n0, vmin);
vout4567n0 = vmaxq_f32(vout4567n0, vmin);
vout89ABn0 = vmaxq_f32(vout89ABn0, vmin);
voutCDEFn0 = vmaxq_f32(voutCDEFn0, vmin);
vout0123n1 = vmaxq_f32(vout0123n1, vmin);
vout4567n1 = vmaxq_f32(vout4567n1, vmin);
vout89ABn1 = vmaxq_f32(vout89ABn1, vmin);
voutCDEFn1 = vmaxq_f32(voutCDEFn1, vmin);
vst1q_f32(output + 0, vout0123n0);
vst1q_f32(output + 4, vout4567n0);
vst1q_f32(output + 8, vout89ABn0);
vst1q_f32(output + 12, voutCDEFn0);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n1);
vst1q_f32(output + 4, vout4567n1);
vst1q_f32(output + 8, vout89ABn1);
vst1q_f32(output + 12, voutCDEFn1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 2;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
float32x4_t vacc89AB = vacc0123;
float32x4_t vaccCDEF = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
const float32x4_t vi89AB = vld1q_f32(input + 8);
const float32x4_t viCDEF = vld1q_f32(input + 12);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
vacc89AB = vfmaq_f32(vacc89AB, vi89AB, vw);
vaccCDEF = vfmaq_f32(vaccCDEF, viCDEF, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vout89AB = vmaxq_f32(vout89AB, vmin);
voutCDEF = vmaxq_f32(voutCDEF, vmin);
vst1q_f32(output + 0, vout0123);
vst1q_f32(output + 4, vout4567);
vst1q_f32(output + 8, vout89AB);
vst1q_f32(output + 12, voutCDEF);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 16;
}
output_decrement += 8 * sizeof(float);
if (mc & (8 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123n0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n0 = vacc0123n0;
float32x4_t vacc0123n1 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n1 = vacc0123n1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_f32(w); w += 2;
vacc0123n0 = vfmaq_lane_f32(vacc0123n0, vi0123, vw, 0);
vacc4567n0 = vfmaq_lane_f32(vacc4567n0, vi4567, vw, 0);
vacc0123n1 = vfmaq_lane_f32(vacc0123n1, vi0123, vw, 1);
vacc4567n1 = vfmaq_lane_f32(vacc4567n1, vi4567, vw, 1);
} while (--nnz != 0);
}
float32x4_t vout0123n0 = vminq_f32(vacc0123n0, vmax);
float32x4_t vout4567n0 = vminq_f32(vacc4567n0, vmax);
float32x4_t vout0123n1 = vminq_f32(vacc0123n1, vmax);
float32x4_t vout4567n1 = vminq_f32(vacc4567n1, vmax);
vout0123n0 = vmaxq_f32(vout0123n0, vmin);
vout4567n0 = vmaxq_f32(vout4567n0, vmin);
vout0123n1 = vmaxq_f32(vout0123n1, vmin);
vout4567n1 = vmaxq_f32(vout4567n1, vmin);
vst1q_f32(output + 0, vout0123n0);
vst1q_f32(output + 4, vout4567n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n1);
vst1q_f32(output + 4, vout4567n1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 2;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output + 0, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
}
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123n0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123n1 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_f32(w); w += 2;
vacc0123n0 = vfmaq_lane_f32(vacc0123n0, vi0123, vw, 0);
vacc0123n1 = vfmaq_lane_f32(vacc0123n1, vi0123, vw, 1);
} while (--nnz != 0);
}
float32x4_t vout0123n0 = vminq_f32(vacc0123n0, vmax);
float32x4_t vout0123n1 = vminq_f32(vacc0123n1, vmax);
vout0123n0 = vmaxq_f32(vout0123n0, vmin);
vout0123n1 = vmaxq_f32(vout0123n1, vmin);
vst1q_f32(output + 0, vout0123n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 2;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output + 0, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01n0 = vld1_dup_f32(w); w += 1;
float32x2_t vacc01n1 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_f32(w); w += 2;
vacc01n0 = vfma_lane_f32(vacc01n0, vi01, vw, 0);
vacc01n1 = vfma_lane_f32(vacc01n1, vi01, vw, 1);
} while (--nnz != 0);
}
float32x2_t vout01n0 = vmin_f32(vacc01n0, vget_low_f32(vmax));
float32x2_t vout01n1 = vmin_f32(vacc01n1, vget_low_f32(vmax));
vout01n0 = vmax_f32(vout01n0, vget_low_f32(vmin));
vout01n1 = vmax_f32(vout01n1, vget_low_f32(vmin));
vst1_f32(output + 0, vout01n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_f32(output + 0, vout01n1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 2;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vfma_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0n0 = vld1_dup_f32(w); w += 1;
float32x2_t vacc0n1 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_f32(w); w += 2;
vacc0n0 = vfma_lane_f32(vacc0n0, vi0, vw, 0);
vacc0n1 = vfma_lane_f32(vacc0n1, vi0, vw, 1);
} while (--nnz != 0);
}
float32x2_t vout0n0 = vmin_f32(vacc0n0, vget_low_f32(vmax));
float32x2_t vout0n1 = vmin_f32(vacc0n1, vget_low_f32(vmax));
vout0n0 = vmax_f32(vout0n0, vget_low_f32(vmin));
vout0n1 = vmax_f32(vout0n1, vget_low_f32(vmin));
vst1_lane_f32(output + 0, vout0n0, 0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_lane_f32(output + 0, vout0n1, 0);
output = (float*) ((uintptr_t) output + output_stride);
n -= 2;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vfma_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 24,823 | 40.58124 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-4x1-minmax-neon-pipelined.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_4x1__neon_pipelined(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 4 * sizeof(float);
while XNN_LIKELY(mc >= 4 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
float32x4_t vw = vld1q_dup_f32(w); w += 1;
intptr_t diff = *dmap++;
float32x4_t vi0123 = vld1q_f32(input);
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vw;
vw = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
diff = *dmap++;
vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vi0123 = vld1q_f32(input);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
mc -= 4 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x2_t vb = vld1_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc01 = vmla_f32(vacc01, vi01, vb);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x2_t vb = vld1_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0 = vmla_f32(vacc0, vi0, vb);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 4,641 | 33.385185 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-4x1-minmax-neon-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_4x1__neon_x2(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 4 * sizeof(float);
while XNN_LIKELY(mc >= 4 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123x0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123x1 = vmovq_n_f32(0.0f);
for (; nnz >= 2; nnz -= 2) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
dmap += 2;
const float32x4_t vi0123x0 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff0);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw0 = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123x0 = vmlaq_f32(vacc0123x0, vi0123x0, vw0);
const float32x4_t vi0123x1 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff1);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw1 = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123x1 = vmlaq_f32(vacc0123x1, vi0123x1, vw1);
}
float32x4_t vacc0123 = vacc0123x0;
vacc0123 = vaddq_f32(vacc0123, vacc0123x1);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
mc -= 4 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vmla_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vmla_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 5,298 | 35.047619 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-4x1-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_4x1__neon(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 4 * sizeof(float);
while XNN_LIKELY(mc >= 4 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
mc -= 4 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vmla_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vmla_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 4,370 | 33.417323 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-4x1-minmax-neonfma-pipelined.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_4x1__neonfma_pipelined(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 4 * sizeof(float);
while XNN_LIKELY(mc >= 4 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
float32x4_t vw = vld1q_dup_f32(w); w += 1;
intptr_t diff = *dmap++;
float32x4_t vi0123 = vld1q_f32(input);
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vw;
vw = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
diff = *dmap++;
vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vi0123 = vld1q_f32(input);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
mc -= 4 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x2_t vb = vld1_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc01 = vfma_f32(vacc01, vi01, vb);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x2_t vb = vld1_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0 = vfma_f32(vacc0, vi0, vb);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 4,644 | 33.407407 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-4x1-minmax-neonfma-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_4x1__neonfma_x2(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 4 * sizeof(float);
while XNN_LIKELY(mc >= 4 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123x0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123x1 = vmovq_n_f32(0.0f);
for (; nnz >= 2; nnz -= 2) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
dmap += 2;
const float32x4_t vi0123x0 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff0);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw0 = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123x0 = vfmaq_f32(vacc0123x0, vi0123x0, vw0);
const float32x4_t vi0123x1 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff1);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw1 = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123x1 = vfmaq_f32(vacc0123x1, vi0123x1, vw1);
}
float32x4_t vacc0123 = vacc0123x0;
vacc0123 = vaddq_f32(vacc0123, vacc0123x1);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
mc -= 4 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vfma_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vfma_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 5,301 | 35.068027 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-4x1-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_4x1__neonfma(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 4 * sizeof(float);
while XNN_LIKELY(mc >= 4 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
mc -= 4 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vfma_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vfma_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 4,373 | 33.440945 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-4x1-minmax-scalar-pipelined.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/scalar-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_4x1__scalar_pipelined(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
size_t output_decrement = output_stride * nc - 4 * sizeof(float);
while XNN_LIKELY(mc >= 4 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
float vw = *w++;
intptr_t diff = *dmap++;
float vi0 = input[0];
float vi1 = input[1];
float vi2 = input[2];
float vi3 = input[3];
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float vacc0 = vw;
float vacc1 = vw;
float vacc2 = vw;
float vacc3 = vw;
vw = *w++;
if XNN_LIKELY(nnz != 0) {
do {
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
vacc2 += vi2 * vw;
vacc3 += vi3 * vw;
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = *w++;
vi0 = input[0];
vi1 = input[1];
vi2 = input[2];
vi3 = input[3];
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
float vout2 = math_min_f32(vacc2, vmax);
float vout3 = math_min_f32(vacc3, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
vout2 = math_max_f32(vout2, vmin);
vout3 = math_max_f32(vout3, vmin);
output[0] = vout0;
output[1] = vout1;
output[2] = vout2;
output[3] = vout3;
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
mc -= 4 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
float vw = *w++;
intptr_t diff = *dmap++;
float vi0 = input[0];
float vi1 = input[1];
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float vacc0 = vw;
float vacc1 = vw;
vw = *w++;
if XNN_LIKELY(nnz != 0) {
do {
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = *w++;
vi0 = input[0];
vi1 = input[1];
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
output[0] = vout0;
output[1] = vout1;
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
float vw = *w++;
intptr_t diff = *dmap++;
float vi0 = input[0];
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float vacc0 = vw;
vw = *w++;
if XNN_LIKELY(nnz != 0) {
do {
vacc0 += vi0 * vw;
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = *w++;
vi0 = input[0];
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
vout0 = math_max_f32(vout0, vmin);
output[0] = vout0;
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 4,723 | 28.898734 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-4x1-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_4x1__scalar(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
size_t output_decrement = output_stride * nc - 4 * sizeof(float);
while (mc >= 4 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 1) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = *w++;
float vacc1x0 = vacc0x0;
float vacc2x0 = vacc0x0;
float vacc3x0 = vacc0x0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = *w++;
vacc0x0 += vi0 * vw0;
vacc1x0 += vi1 * vw0;
vacc2x0 += vi2 * vw0;
vacc3x0 += vi3 * vw0;
} while (--nnz != 0);
}
float vout0x0 = math_min_f32(vacc0x0, vmax);
float vout1x0 = math_min_f32(vacc1x0, vmax);
float vout2x0 = math_min_f32(vacc2x0, vmax);
float vout3x0 = math_min_f32(vacc3x0, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
vout1x0 = math_max_f32(vout1x0, vmin);
vout2x0 = math_max_f32(vout2x0, vmin);
vout3x0 = math_max_f32(vout3x0, vmin);
output[0] = vout0x0;
output[1] = vout1x0;
output[2] = vout2x0;
output[3] = vout3x0;
output[0] = vout0x0;
output[1] = vout1x0;
output[2] = vout2x0;
output[3] = vout3x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = *w++;
float vacc1 = vacc0;
float vacc2 = vacc0;
float vacc3 = vacc0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = *w++;
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
vacc2 += vi2 * vw;
vacc3 += vi3 * vw;
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
float vout2 = math_min_f32(vacc2, vmax);
float vout3 = math_min_f32(vacc3, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
vout2 = math_max_f32(vout2, vmin);
vout3 = math_max_f32(vout3, vmin);
output[0] = vout0;
output[1] = vout1;
output[2] = vout2;
output[3] = vout3;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
mc -= 4 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 1) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = *w++;
float vacc1x0 = vacc0x0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = *w++;
vacc0x0 += vi0 * vw0;
vacc1x0 += vi1 * vw0;
} while (--nnz != 0);
}
float vout0x0 = math_min_f32(vacc0x0, vmax);
float vout1x0 = math_min_f32(vacc1x0, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
vout1x0 = math_max_f32(vout1x0, vmin);
output[0] = vout0x0;
output[1] = vout1x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = *w++;
float vacc1 = vacc0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = *w++;
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
output[0] = vout0;
output[1] = vout1;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 1) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = *w++;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = *w++;
vacc0x0 += vi0 * vw0;
} while (--nnz != 0);
}
float vout0x0 = math_min_f32(vacc0x0, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
output[0] = vout0x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = *w++;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = *w++;
vacc0 += vi0 * vw;
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
vout0 = math_max_f32(vout0, vmin);
output[0] = vout0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 7,681 | 32.4 | 84 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-4x1-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_4x1__sse(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const __m128 vmin = _mm_load_ps(params->sse.min);
const __m128 vmax = _mm_load_ps(params->sse.max);
size_t output_decrement = output_stride * nc - 4 * sizeof(float);
while XNN_LIKELY(mc >= 4 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
__m128 vacc0123 = _mm_load1_ps(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const __m128 vi0123 = _mm_loadu_ps(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const __m128 vw = _mm_load1_ps(w); w += 1;
vacc0123 = _mm_add_ps(vacc0123, _mm_mul_ps(vi0123, vw));
} while (--nnz != 0);
}
__m128 vout0123 = _mm_min_ps(vacc0123, vmax);
vout0123 = _mm_max_ps(vout0123, vmin);
_mm_storeu_ps(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
mc -= 4 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
__m128 vacc01 = _mm_load_ss(w); w += 1;
vacc01 = _mm_unpacklo_ps(vacc01, vacc01);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const __m128 vi01 = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
__m128 vw = _mm_load_ss(w); w += 1;
vw = _mm_unpacklo_ps(vw, vw);
vacc01 = _mm_add_ps(vacc01, _mm_mul_ps(vi01, vw));
} while (--nnz != 0);
}
__m128 vout01 = _mm_min_ps(vacc01, vmax);
vout01 = _mm_max_ps(vout01, vmin);
_mm_storel_pi((__m64*) output, vout01);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
__m128 vacc0 = _mm_load_ss(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const __m128 vi0 = _mm_load_ss(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const __m128 vw = _mm_load_ss(w); w += 1;
vacc0 = _mm_add_ss(vacc0, _mm_mul_ss(vi0, vw));
} while (--nnz != 0);
}
__m128 vout0 = _mm_min_ss(vacc0, vmax);
vout0 = _mm_max_ss(vout0, vmin);
_mm_store_ss(output, vout0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 4,087 | 33.644068 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-4x1-minmax-wasmsimd-arm-pipelined-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_4x1__wasmsimd_arm_pipelined_x2(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 4 * sizeof(float);
while XNN_LIKELY(mc >= 4 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
v128_t vw = wasm_v128_load32_splat(w); w += 1;
intptr_t diff = *dmap++;
v128_t vi0123 = wasm_v128_load(input + 0);
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = vw;
vw = wasm_v128_load32_splat(w); w += 1;
for (; nnz >= 2; nnz -= 2) {
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
}
if XNN_LIKELY(nnz != 0) {
do {
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
vout0123 = wasm_f32x4_max(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
mc -= 4 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_min(vmax, vacc01);
vout01 = wasm_f32x4_max(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_min(vmax, vacc0);
vout0 = wasm_f32x4_max(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 4,885 | 34.664234 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-4x1-minmax-wasmsimd-arm-pipelined.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_4x1__wasmsimd_arm_pipelined(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 4 * sizeof(float);
while XNN_LIKELY(mc >= 4 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
v128_t vw = wasm_v128_load32_splat(w); w += 1;
intptr_t diff = *dmap++;
v128_t vi0123 = wasm_v128_load(input + 0);
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = vw;
vw = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
vout0123 = wasm_f32x4_max(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
mc -= 4 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_min(vmax, vacc01);
vout01 = wasm_f32x4_max(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_min(vmax, vacc0);
vout0 = wasm_f32x4_max(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 4,299 | 33.4 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-4x1-minmax-wasmsimd-arm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_4x1__wasmsimd_arm_x2(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 4 * sizeof(float);
while XNN_LIKELY(mc >= 4 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123x0 = wasm_v128_load32_splat(w);
w += 1;
v128_t vacc0123x1 = wasm_f32x4_const_splat(0.0f);
for (; nnz >= 2; nnz -= 2) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
dmap += 2;
const v128_t vi0123x0 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
const v128_t vw0 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x0 = wasm_f32x4_add(vacc0123x0, wasm_f32x4_mul(vi0123x0, vw0));
const v128_t vi0123x1 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
const v128_t vw1 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x1 = wasm_f32x4_add(vacc0123x1, wasm_f32x4_mul(vi0123x1, vw1));
}
v128_t vacc0123 = vacc0123x0;
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x1);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
vout0123 = wasm_f32x4_max(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
mc -= 4 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_min(vmax, vacc01);
vout01 = wasm_f32x4_max(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_min(vmax, vacc0);
vout0 = wasm_f32x4_max(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 5,040 | 35.79562 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-4x1-minmax-wasmsimd-arm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_4x1__wasmsimd_arm_x4(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 4 * sizeof(float);
while XNN_LIKELY(mc >= 4 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123x0 = wasm_v128_load32_splat(w);
w += 1;
v128_t vacc0123x1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc0123x2 = wasm_f32x4_const_splat(0.0f);
v128_t vacc0123x3 = wasm_f32x4_const_splat(0.0f);
for (; nnz >= 4; nnz -= 4) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
const intptr_t diff2 = dmap[2];
const intptr_t diff3 = dmap[3];
dmap += 4;
const v128_t vi0123x0 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
const v128_t vw0 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x0 = wasm_f32x4_add(vacc0123x0, wasm_f32x4_mul(vi0123x0, vw0));
const v128_t vi0123x1 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
const v128_t vw1 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x1 = wasm_f32x4_add(vacc0123x1, wasm_f32x4_mul(vi0123x1, vw1));
const v128_t vi0123x2 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff2);
const v128_t vw2 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x2 = wasm_f32x4_add(vacc0123x2, wasm_f32x4_mul(vi0123x2, vw2));
const v128_t vi0123x3 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff3);
const v128_t vw3 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x3 = wasm_f32x4_add(vacc0123x3, wasm_f32x4_mul(vi0123x3, vw3));
}
v128_t vacc0123 = vacc0123x0;
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x1);
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x2);
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x3);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
vout0123 = wasm_f32x4_max(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
mc -= 4 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_min(vmax, vacc01);
vout01 = wasm_f32x4_max(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_min(vmax, vacc0);
vout0 = wasm_f32x4_max(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 5,912 | 37.647059 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-4x1-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_4x1__wasmsimd_arm(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 4 * sizeof(float);
while XNN_LIKELY(mc >= 4 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
vout0123 = wasm_f32x4_max(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
mc -= 4 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_min(vmax, vacc01);
vout01 = wasm_f32x4_max(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_min(vmax, vacc0);
vout0 = wasm_f32x4_max(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 4,170 | 34.347458 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-4x1-minmax-wasmsimd-x86-pipelined-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_4x1__wasmsimd_x86_pipelined_x2(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 4 * sizeof(float);
while XNN_LIKELY(mc >= 4 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
v128_t vw = wasm_v128_load32_splat(w); w += 1;
intptr_t diff = *dmap++;
v128_t vi0123 = wasm_v128_load(input + 0);
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = vw;
vw = wasm_v128_load32_splat(w); w += 1;
for (; nnz >= 2; nnz -= 2) {
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
}
if XNN_LIKELY(nnz != 0) {
do {
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
mc -= 4 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_pmin(vmax, vacc01);
vout01 = wasm_f32x4_pmax(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_pmin(vmax, vacc0);
vout0 = wasm_f32x4_pmax(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 4,891 | 34.708029 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-4x1-minmax-wasmsimd-x86-pipelined.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_4x1__wasmsimd_x86_pipelined(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 4 * sizeof(float);
while XNN_LIKELY(mc >= 4 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
v128_t vw = wasm_v128_load32_splat(w); w += 1;
intptr_t diff = *dmap++;
v128_t vi0123 = wasm_v128_load(input + 0);
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = vw;
vw = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
mc -= 4 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_pmin(vmax, vacc01);
vout01 = wasm_f32x4_pmax(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_pmin(vmax, vacc0);
vout0 = wasm_f32x4_pmax(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 4,305 | 33.448 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-4x1-minmax-wasmsimd-x86-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_4x1__wasmsimd_x86_x2(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 4 * sizeof(float);
while XNN_LIKELY(mc >= 4 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123x0 = wasm_v128_load32_splat(w);
w += 1;
v128_t vacc0123x1 = wasm_f32x4_const_splat(0.0f);
for (; nnz >= 2; nnz -= 2) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
dmap += 2;
const v128_t vi0123x0 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
const v128_t vw0 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x0 = wasm_f32x4_add(vacc0123x0, wasm_f32x4_mul(vi0123x0, vw0));
const v128_t vi0123x1 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
const v128_t vw1 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x1 = wasm_f32x4_add(vacc0123x1, wasm_f32x4_mul(vi0123x1, vw1));
}
v128_t vacc0123 = vacc0123x0;
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x1);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
mc -= 4 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_pmin(vmax, vacc01);
vout01 = wasm_f32x4_pmax(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_pmin(vmax, vacc0);
vout0 = wasm_f32x4_pmax(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 5,046 | 35.839416 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-4x1-minmax-wasmsimd-x86-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_4x1__wasmsimd_x86_x4(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 4 * sizeof(float);
while XNN_LIKELY(mc >= 4 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123x0 = wasm_v128_load32_splat(w);
w += 1;
v128_t vacc0123x1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc0123x2 = wasm_f32x4_const_splat(0.0f);
v128_t vacc0123x3 = wasm_f32x4_const_splat(0.0f);
for (; nnz >= 4; nnz -= 4) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
const intptr_t diff2 = dmap[2];
const intptr_t diff3 = dmap[3];
dmap += 4;
const v128_t vi0123x0 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
const v128_t vw0 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x0 = wasm_f32x4_add(vacc0123x0, wasm_f32x4_mul(vi0123x0, vw0));
const v128_t vi0123x1 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
const v128_t vw1 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x1 = wasm_f32x4_add(vacc0123x1, wasm_f32x4_mul(vi0123x1, vw1));
const v128_t vi0123x2 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff2);
const v128_t vw2 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x2 = wasm_f32x4_add(vacc0123x2, wasm_f32x4_mul(vi0123x2, vw2));
const v128_t vi0123x3 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff3);
const v128_t vw3 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x3 = wasm_f32x4_add(vacc0123x3, wasm_f32x4_mul(vi0123x3, vw3));
}
v128_t vacc0123 = vacc0123x0;
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x1);
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x2);
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x3);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
mc -= 4 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_pmin(vmax, vacc01);
vout01 = wasm_f32x4_pmax(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_pmin(vmax, vacc0);
vout0 = wasm_f32x4_pmax(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 5,918 | 37.686275 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-4x1-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_4x1__wasmsimd_x86(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 4 * sizeof(float);
while XNN_LIKELY(mc >= 4 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
mc -= 4 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_pmin(vmax, vacc01);
vout01 = wasm_f32x4_pmax(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_pmin(vmax, vacc0);
vout0 = wasm_f32x4_pmax(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 4,176 | 34.398305 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-4x2-minmax-aarch64-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon-blocked.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_4x2__aarch64_neonfma(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 4 * sizeof(float);
while XNN_LIKELY(mc >= 4 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123n0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123n1 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x2_t vw = vld1_f32(w); w += 2;
xnn_prefetch_to_l1(w + 32);
vacc0123n0 = vfmaq_lane_f32(vacc0123n0, vi0123, vw, 0);
vacc0123n1 = vfmaq_lane_f32(vacc0123n1, vi0123, vw, 1);
} while (--nnz != 0);
}
float32x4_t vout0123n0 = vminq_f32(vacc0123n0, vmax);
float32x4_t vout0123n1 = vminq_f32(vacc0123n1, vmax);
vout0123n0 = vmaxq_f32(vout0123n0, vmin);
vout0123n1 = vmaxq_f32(vout0123n1, vmin);
vst1q_f32(output + 0, vout0123n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 2;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output + 0, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
mc -= 4 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01n0 = vld1_dup_f32(w); w += 1;
float32x2_t vacc01n1 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_f32(w); w += 2;
vacc01n0 = vfma_lane_f32(vacc01n0, vi01, vw, 0);
vacc01n1 = vfma_lane_f32(vacc01n1, vi01, vw, 1);
} while (--nnz != 0);
}
float32x2_t vout01n0 = vmin_f32(vacc01n0, vget_low_f32(vmax));
float32x2_t vout01n1 = vmin_f32(vacc01n1, vget_low_f32(vmax));
vout01n0 = vmax_f32(vout01n0, vget_low_f32(vmin));
vout01n1 = vmax_f32(vout01n1, vget_low_f32(vmin));
vst1_f32(output + 0, vout01n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_f32(output + 0, vout01n1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 2;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vfma_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0n0 = vld1_dup_f32(w); w += 1;
float32x2_t vacc0n1 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_f32(w); w += 2;
vacc0n0 = vfma_lane_f32(vacc0n0, vi0, vw, 0);
vacc0n1 = vfma_lane_f32(vacc0n1, vi0, vw, 1);
} while (--nnz != 0);
}
float32x2_t vout0n0 = vmin_f32(vacc0n0, vget_low_f32(vmax));
float32x2_t vout0n1 = vmin_f32(vacc0n1, vget_low_f32(vmax));
vout0n0 = vmax_f32(vout0n0, vget_low_f32(vmin));
vout0n1 = vmax_f32(vout0n1, vget_low_f32(vmin));
vst1_lane_f32(output + 0, vout0n0, 0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_lane_f32(output + 0, vout0n1, 0);
output = (float*) ((uintptr_t) output + output_stride);
n -= 2;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vfma_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 8,134 | 34.679825 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-4x4-minmax-aarch64-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon-blocked.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_4x4__aarch64_neonfma(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 4 * sizeof(float);
while XNN_LIKELY(mc >= 4 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 4) {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123n0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123n1 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123n2 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123n3 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_f32(w); w += 4;
xnn_prefetch_to_l1(w + 32);
vacc0123n0 = vfmaq_laneq_f32(vacc0123n0, vi0123, vw, 0);
vacc0123n1 = vfmaq_laneq_f32(vacc0123n1, vi0123, vw, 1);
vacc0123n2 = vfmaq_laneq_f32(vacc0123n2, vi0123, vw, 2);
vacc0123n3 = vfmaq_laneq_f32(vacc0123n3, vi0123, vw, 3);
} while (--nnz != 0);
}
float32x4_t vout0123n0 = vminq_f32(vacc0123n0, vmax);
float32x4_t vout0123n1 = vminq_f32(vacc0123n1, vmax);
float32x4_t vout0123n2 = vminq_f32(vacc0123n2, vmax);
float32x4_t vout0123n3 = vminq_f32(vacc0123n3, vmax);
vout0123n0 = vmaxq_f32(vout0123n0, vmin);
vout0123n1 = vmaxq_f32(vout0123n1, vmin);
vout0123n2 = vmaxq_f32(vout0123n2, vmin);
vout0123n3 = vmaxq_f32(vout0123n3, vmin);
vst1q_f32(output + 0, vout0123n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n1);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n2);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n3);
output = (float*) ((uintptr_t) output + output_stride);
n -= 4;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output + 0, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
mc -= 4 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 4) {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01n0 = vld1_dup_f32(w); w += 1;
float32x2_t vacc01n1 = vld1_dup_f32(w); w += 1;
float32x2_t vacc01n2 = vld1_dup_f32(w); w += 1;
float32x2_t vacc01n3 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_f32(w); w += 4;
vacc01n0 = vfma_laneq_f32(vacc01n0, vi01, vw, 0);
vacc01n1 = vfma_laneq_f32(vacc01n1, vi01, vw, 1);
vacc01n2 = vfma_laneq_f32(vacc01n2, vi01, vw, 2);
vacc01n3 = vfma_laneq_f32(vacc01n3, vi01, vw, 3);
} while (--nnz != 0);
}
float32x2_t vout01n0 = vmin_f32(vacc01n0, vget_low_f32(vmax));
float32x2_t vout01n1 = vmin_f32(vacc01n1, vget_low_f32(vmax));
float32x2_t vout01n2 = vmin_f32(vacc01n2, vget_low_f32(vmax));
float32x2_t vout01n3 = vmin_f32(vacc01n3, vget_low_f32(vmax));
vout01n0 = vmax_f32(vout01n0, vget_low_f32(vmin));
vout01n1 = vmax_f32(vout01n1, vget_low_f32(vmin));
vout01n2 = vmax_f32(vout01n2, vget_low_f32(vmin));
vout01n3 = vmax_f32(vout01n3, vget_low_f32(vmin));
vst1_f32(output + 0, vout01n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_f32(output + 0, vout01n1);
output = (float*) ((uintptr_t) output + output_stride);
vst1_f32(output + 0, vout01n2);
output = (float*) ((uintptr_t) output + output_stride);
vst1_f32(output + 0, vout01n3);
output = (float*) ((uintptr_t) output + output_stride);
n -= 4;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vfma_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 4) {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0n0 = vld1_dup_f32(w); w += 1;
float32x2_t vacc0n1 = vld1_dup_f32(w); w += 1;
float32x2_t vacc0n2 = vld1_dup_f32(w); w += 1;
float32x2_t vacc0n3 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_f32(w); w += 4;
vacc0n0 = vfma_laneq_f32(vacc0n0, vi0, vw, 0);
vacc0n1 = vfma_laneq_f32(vacc0n1, vi0, vw, 1);
vacc0n2 = vfma_laneq_f32(vacc0n2, vi0, vw, 2);
vacc0n3 = vfma_laneq_f32(vacc0n3, vi0, vw, 3);
} while (--nnz != 0);
}
float32x2_t vout0n0 = vmin_f32(vacc0n0, vget_low_f32(vmax));
float32x2_t vout0n1 = vmin_f32(vacc0n1, vget_low_f32(vmax));
float32x2_t vout0n2 = vmin_f32(vacc0n2, vget_low_f32(vmax));
float32x2_t vout0n3 = vmin_f32(vacc0n3, vget_low_f32(vmax));
vout0n0 = vmax_f32(vout0n0, vget_low_f32(vmin));
vout0n1 = vmax_f32(vout0n1, vget_low_f32(vmin));
vout0n2 = vmax_f32(vout0n2, vget_low_f32(vmin));
vout0n3 = vmax_f32(vout0n3, vget_low_f32(vmin));
vst1_lane_f32(output + 0, vout0n0, 0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_lane_f32(output + 0, vout0n1, 0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_lane_f32(output + 0, vout0n2, 0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_lane_f32(output + 0, vout0n3, 0);
output = (float*) ((uintptr_t) output + output_stride);
n -= 4;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vfma_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 10,219 | 37.712121 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-8x1-minmax-neon-pipelined.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_8x1__neon_pipelined(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while XNN_LIKELY(mc >= 8 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
float32x4_t vw = vld1q_dup_f32(w); w += 1;
intptr_t diff = *dmap++;
float32x4_t vi0123 = vld1q_f32(input);
float32x4_t vi4567 = vld1q_f32(input + 4);
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vw;
float32x4_t vacc4567 = vw;
vw = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
diff = *dmap++;
vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vi0123 = vld1q_f32(input);
vi4567 = vld1q_f32(input + 4);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vb = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vmlaq_f32(vacc0123, vi0123, vb);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x2_t vb = vld1_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc01 = vmla_f32(vacc01, vi01, vb);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x2_t vb = vld1_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0 = vmla_f32(vacc0, vi0, vb);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 6,059 | 34.647059 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-8x1-minmax-neon-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_8x1__neon_x2(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while XNN_LIKELY(mc >= 8 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123x0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123x1 = vmovq_n_f32(0.0f);
float32x4_t vacc4567x0 = vacc0123x0;
float32x4_t vacc4567x1 = vmovq_n_f32(0.0f);
for (; nnz >= 2; nnz -= 2) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
dmap += 2;
const float32x4_t vi0123x0 = vld1q_f32(input);
const float32x4_t vi4567x0 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff0);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw0 = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123x0 = vmlaq_f32(vacc0123x0, vi0123x0, vw0);
vacc4567x0 = vmlaq_f32(vacc4567x0, vi4567x0, vw0);
const float32x4_t vi0123x1 = vld1q_f32(input);
const float32x4_t vi4567x1 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff1);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw1 = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123x1 = vmlaq_f32(vacc0123x1, vi0123x1, vw1);
vacc4567x1 = vmlaq_f32(vacc4567x1, vi4567x1, vw1);
}
float32x4_t vacc0123 = vacc0123x0;
float32x4_t vacc4567 = vacc4567x0;
vacc0123 = vaddq_f32(vacc0123, vacc0123x1);
vacc4567 = vaddq_f32(vacc4567, vacc4567x1);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vmla_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vmla_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 6,990 | 36.586022 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-8x1-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_8x1__neon(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while XNN_LIKELY(mc >= 8 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vmla_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vmla_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 5,681 | 34.735849 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-8x1-minmax-neonfma-pipelined.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_8x1__neonfma_pipelined(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while XNN_LIKELY(mc >= 8 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
float32x4_t vw = vld1q_dup_f32(w); w += 1;
intptr_t diff = *dmap++;
float32x4_t vi0123 = vld1q_f32(input);
float32x4_t vi4567 = vld1q_f32(input + 4);
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vw;
float32x4_t vacc4567 = vw;
vw = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
diff = *dmap++;
vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vi0123 = vld1q_f32(input);
vi4567 = vld1q_f32(input + 4);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vb = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vfmaq_f32(vacc0123, vi0123, vb);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x2_t vb = vld1_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc01 = vfma_f32(vacc01, vi01, vb);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x2_t vb = vld1_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0 = vfma_f32(vacc0, vi0, vb);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 6,062 | 34.664706 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-8x1-minmax-neonfma-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_8x1__neonfma_x2(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while XNN_LIKELY(mc >= 8 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123x0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123x1 = vmovq_n_f32(0.0f);
float32x4_t vacc4567x0 = vacc0123x0;
float32x4_t vacc4567x1 = vmovq_n_f32(0.0f);
for (; nnz >= 2; nnz -= 2) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
dmap += 2;
const float32x4_t vi0123x0 = vld1q_f32(input);
const float32x4_t vi4567x0 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff0);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw0 = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123x0 = vfmaq_f32(vacc0123x0, vi0123x0, vw0);
vacc4567x0 = vfmaq_f32(vacc4567x0, vi4567x0, vw0);
const float32x4_t vi0123x1 = vld1q_f32(input);
const float32x4_t vi4567x1 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff1);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw1 = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123x1 = vfmaq_f32(vacc0123x1, vi0123x1, vw1);
vacc4567x1 = vfmaq_f32(vacc4567x1, vi4567x1, vw1);
}
float32x4_t vacc0123 = vacc0123x0;
float32x4_t vacc4567 = vacc4567x0;
vacc0123 = vaddq_f32(vacc0123, vacc0123x1);
vacc4567 = vaddq_f32(vacc4567, vacc4567x1);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vfma_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vfma_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 6,993 | 36.602151 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-8x1-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_8x1__neonfma(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while XNN_LIKELY(mc >= 8 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vfma_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vfma_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 0);
output = (float*) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 5,684 | 34.754717 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-8x1-minmax-scalar-pipelined.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/scalar-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while XNN_LIKELY(mc >= 8 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
float vw = *w++;
intptr_t diff = *dmap++;
float vi0 = input[0];
float vi1 = input[1];
float vi2 = input[2];
float vi3 = input[3];
float vi4 = input[4];
float vi5 = input[5];
float vi6 = input[6];
float vi7 = input[7];
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float vacc0 = vw;
float vacc1 = vw;
float vacc2 = vw;
float vacc3 = vw;
float vacc4 = vw;
float vacc5 = vw;
float vacc6 = vw;
float vacc7 = vw;
vw = *w++;
if XNN_LIKELY(nnz != 0) {
do {
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
vacc2 += vi2 * vw;
vacc3 += vi3 * vw;
vacc4 += vi4 * vw;
vacc5 += vi5 * vw;
vacc6 += vi6 * vw;
vacc7 += vi7 * vw;
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = *w++;
vi0 = input[0];
vi1 = input[1];
vi2 = input[2];
vi3 = input[3];
vi4 = input[4];
vi5 = input[5];
vi6 = input[6];
vi7 = input[7];
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
float vout2 = math_min_f32(vacc2, vmax);
float vout3 = math_min_f32(vacc3, vmax);
float vout4 = math_min_f32(vacc4, vmax);
float vout5 = math_min_f32(vacc5, vmax);
float vout6 = math_min_f32(vacc6, vmax);
float vout7 = math_min_f32(vacc7, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
vout2 = math_max_f32(vout2, vmin);
vout3 = math_max_f32(vout3, vmin);
vout4 = math_max_f32(vout4, vmin);
vout5 = math_max_f32(vout5, vmin);
vout6 = math_max_f32(vout6, vmin);
vout7 = math_max_f32(vout7, vmin);
output[0] = vout0;
output[1] = vout1;
output[2] = vout2;
output[3] = vout3;
output[4] = vout4;
output[5] = vout5;
output[6] = vout6;
output[7] = vout7;
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
float vw = *w++;
intptr_t diff = *dmap++;
float vi0 = input[0];
float vi1 = input[1];
float vi2 = input[2];
float vi3 = input[3];
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float vacc0 = vw;
float vacc1 = vw;
float vacc2 = vw;
float vacc3 = vw;
vw = *w++;
if XNN_LIKELY(nnz != 0) {
do {
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
vacc2 += vi2 * vw;
vacc3 += vi3 * vw;
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = *w++;
vi0 = input[0];
vi1 = input[1];
vi2 = input[2];
vi3 = input[3];
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
float vout2 = math_min_f32(vacc2, vmax);
float vout3 = math_min_f32(vacc3, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
vout2 = math_max_f32(vout2, vmin);
vout3 = math_max_f32(vout3, vmin);
output[0] = vout0;
output[1] = vout1;
output[2] = vout2;
output[3] = vout3;
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
float vw = *w++;
intptr_t diff = *dmap++;
float vi0 = input[0];
float vi1 = input[1];
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float vacc0 = vw;
float vacc1 = vw;
vw = *w++;
if XNN_LIKELY(nnz != 0) {
do {
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = *w++;
vi0 = input[0];
vi1 = input[1];
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
output[0] = vout0;
output[1] = vout1;
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
float vw = *w++;
intptr_t diff = *dmap++;
float vi0 = input[0];
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float vacc0 = vw;
vw = *w++;
if XNN_LIKELY(nnz != 0) {
do {
vacc0 += vi0 * vw;
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = *w++;
vi0 = input[0];
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
vout0 = math_max_f32(vout0, vmin);
output[0] = vout0;
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 7,277 | 29.579832 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-8x1-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_8x1__scalar(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while (mc >= 8 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 1) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = *w++;
float vacc1x0 = vacc0x0;
float vacc2x0 = vacc0x0;
float vacc3x0 = vacc0x0;
float vacc4x0 = vacc0x0;
float vacc5x0 = vacc0x0;
float vacc6x0 = vacc0x0;
float vacc7x0 = vacc0x0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
const float vi4 = input[4];
const float vi5 = input[5];
const float vi6 = input[6];
const float vi7 = input[7];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = *w++;
vacc0x0 += vi0 * vw0;
vacc1x0 += vi1 * vw0;
vacc2x0 += vi2 * vw0;
vacc3x0 += vi3 * vw0;
vacc4x0 += vi4 * vw0;
vacc5x0 += vi5 * vw0;
vacc6x0 += vi6 * vw0;
vacc7x0 += vi7 * vw0;
} while (--nnz != 0);
}
float vout0x0 = math_min_f32(vacc0x0, vmax);
float vout1x0 = math_min_f32(vacc1x0, vmax);
float vout2x0 = math_min_f32(vacc2x0, vmax);
float vout3x0 = math_min_f32(vacc3x0, vmax);
float vout4x0 = math_min_f32(vacc4x0, vmax);
float vout5x0 = math_min_f32(vacc5x0, vmax);
float vout6x0 = math_min_f32(vacc6x0, vmax);
float vout7x0 = math_min_f32(vacc7x0, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
vout1x0 = math_max_f32(vout1x0, vmin);
vout2x0 = math_max_f32(vout2x0, vmin);
vout3x0 = math_max_f32(vout3x0, vmin);
vout4x0 = math_max_f32(vout4x0, vmin);
vout5x0 = math_max_f32(vout5x0, vmin);
vout6x0 = math_max_f32(vout6x0, vmin);
vout7x0 = math_max_f32(vout7x0, vmin);
output[0] = vout0x0;
output[1] = vout1x0;
output[2] = vout2x0;
output[3] = vout3x0;
output[4] = vout4x0;
output[5] = vout5x0;
output[6] = vout6x0;
output[7] = vout7x0;
output[0] = vout0x0;
output[1] = vout1x0;
output[2] = vout2x0;
output[3] = vout3x0;
output[4] = vout4x0;
output[5] = vout5x0;
output[6] = vout6x0;
output[7] = vout7x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = *w++;
float vacc1 = vacc0;
float vacc2 = vacc0;
float vacc3 = vacc0;
float vacc4 = vacc0;
float vacc5 = vacc0;
float vacc6 = vacc0;
float vacc7 = vacc0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
const float vi4 = input[4];
const float vi5 = input[5];
const float vi6 = input[6];
const float vi7 = input[7];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = *w++;
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
vacc2 += vi2 * vw;
vacc3 += vi3 * vw;
vacc4 += vi4 * vw;
vacc5 += vi5 * vw;
vacc6 += vi6 * vw;
vacc7 += vi7 * vw;
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
float vout2 = math_min_f32(vacc2, vmax);
float vout3 = math_min_f32(vacc3, vmax);
float vout4 = math_min_f32(vacc4, vmax);
float vout5 = math_min_f32(vacc5, vmax);
float vout6 = math_min_f32(vacc6, vmax);
float vout7 = math_min_f32(vacc7, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
vout2 = math_max_f32(vout2, vmin);
vout3 = math_max_f32(vout3, vmin);
vout4 = math_max_f32(vout4, vmin);
vout5 = math_max_f32(vout5, vmin);
vout6 = math_max_f32(vout6, vmin);
vout7 = math_max_f32(vout7, vmin);
output[0] = vout0;
output[1] = vout1;
output[2] = vout2;
output[3] = vout3;
output[4] = vout4;
output[5] = vout5;
output[6] = vout6;
output[7] = vout7;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 1) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = *w++;
float vacc1x0 = vacc0x0;
float vacc2x0 = vacc0x0;
float vacc3x0 = vacc0x0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = *w++;
vacc0x0 += vi0 * vw0;
vacc1x0 += vi1 * vw0;
vacc2x0 += vi2 * vw0;
vacc3x0 += vi3 * vw0;
} while (--nnz != 0);
}
float vout0x0 = math_min_f32(vacc0x0, vmax);
float vout1x0 = math_min_f32(vacc1x0, vmax);
float vout2x0 = math_min_f32(vacc2x0, vmax);
float vout3x0 = math_min_f32(vacc3x0, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
vout1x0 = math_max_f32(vout1x0, vmin);
vout2x0 = math_max_f32(vout2x0, vmin);
vout3x0 = math_max_f32(vout3x0, vmin);
output[0] = vout0x0;
output[1] = vout1x0;
output[2] = vout2x0;
output[3] = vout3x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = *w++;
float vacc1 = vacc0;
float vacc2 = vacc0;
float vacc3 = vacc0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = *w++;
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
vacc2 += vi2 * vw;
vacc3 += vi3 * vw;
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
float vout2 = math_min_f32(vacc2, vmax);
float vout3 = math_min_f32(vacc3, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
vout2 = math_max_f32(vout2, vmin);
vout3 = math_max_f32(vout3, vmin);
output[0] = vout0;
output[1] = vout1;
output[2] = vout2;
output[3] = vout3;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 1) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = *w++;
float vacc1x0 = vacc0x0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = *w++;
vacc0x0 += vi0 * vw0;
vacc1x0 += vi1 * vw0;
} while (--nnz != 0);
}
float vout0x0 = math_min_f32(vacc0x0, vmax);
float vout1x0 = math_min_f32(vacc1x0, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
vout1x0 = math_max_f32(vout1x0, vmin);
output[0] = vout0x0;
output[1] = vout1x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = *w++;
float vacc1 = vacc0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = *w++;
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
output[0] = vout0;
output[1] = vout1;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 1) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = *w++;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = *w++;
vacc0x0 += vi0 * vw0;
} while (--nnz != 0);
}
float vout0x0 = math_min_f32(vacc0x0, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
output[0] = vout0x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = *w++;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = *w++;
vacc0 += vi0 * vw;
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
vout0 = math_max_f32(vout0, vmin);
output[0] = vout0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 12,616 | 33.567123 | 84 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-8x1-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_8x1__sse(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const __m128 vmin = _mm_load_ps(params->sse.min);
const __m128 vmax = _mm_load_ps(params->sse.max);
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while XNN_LIKELY(mc >= 8 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
__m128 vacc0123 = _mm_load1_ps(w); w += 1;
__m128 vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const __m128 vi0123 = _mm_loadu_ps(input);
const __m128 vi4567 = _mm_loadu_ps(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const __m128 vw = _mm_load1_ps(w); w += 1;
vacc0123 = _mm_add_ps(vacc0123, _mm_mul_ps(vi0123, vw));
vacc4567 = _mm_add_ps(vacc4567, _mm_mul_ps(vi4567, vw));
} while (--nnz != 0);
}
__m128 vout0123 = _mm_min_ps(vacc0123, vmax);
__m128 vout4567 = _mm_min_ps(vacc4567, vmax);
vout0123 = _mm_max_ps(vout0123, vmin);
vout4567 = _mm_max_ps(vout4567, vmin);
_mm_storeu_ps(output, vout0123);
_mm_storeu_ps(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
__m128 vacc0123 = _mm_load1_ps(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const __m128 vi0123 = _mm_loadu_ps(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const __m128 vw = _mm_load1_ps(w); w += 1;
vacc0123 = _mm_add_ps(vacc0123, _mm_mul_ps(vi0123, vw));
} while (--nnz != 0);
}
__m128 vout0123 = _mm_min_ps(vacc0123, vmax);
vout0123 = _mm_max_ps(vout0123, vmin);
_mm_storeu_ps(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
__m128 vacc01 = _mm_load_ss(w); w += 1;
vacc01 = _mm_unpacklo_ps(vacc01, vacc01);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const __m128 vi01 = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
__m128 vw = _mm_load_ss(w); w += 1;
vw = _mm_unpacklo_ps(vw, vw);
vacc01 = _mm_add_ps(vacc01, _mm_mul_ps(vi01, vw));
} while (--nnz != 0);
}
__m128 vout01 = _mm_min_ps(vacc01, vmax);
vout01 = _mm_max_ps(vout01, vmin);
_mm_storel_pi((__m64*) output, vout01);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
__m128 vacc0 = _mm_load_ss(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const __m128 vi0 = _mm_load_ss(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const __m128 vw = _mm_load_ss(w); w += 1;
vacc0 = _mm_add_ss(vacc0, _mm_mul_ss(vi0, vw));
} while (--nnz != 0);
}
__m128 vout0 = _mm_min_ss(vacc0, vmax);
vout0 = _mm_max_ss(vout0, vmin);
_mm_store_ss(output, vout0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 5,429 | 35.2 | 87 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.