repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p4c-minmax-wasmsimd-arm-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p4c__wasmsimd_arm_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
w += 20;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,252 | 29.818841 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p4c-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p4c__wasmsimd_arm(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
w += 20;
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,077 | 29.432836 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p4c-minmax-wasmsimd-x86-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p4c__wasmsimd_x86_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
w += 20;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,256 | 29.847826 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p4c-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p4c__wasmsimd_x86(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
w += 20;
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,081 | 29.462687 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p4c-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_ukernel_4p4c__wasmrelaxedsimd_fma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p0);
w += 20;
const v128_t vacc0123 = vacc0123p0;
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p0);
v128_t vacc0123 = vacc0123p0;
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,829 | 28.689922 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p4c-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_ukernel_4p4c__wasmsimd(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
w += 20;
const v128_t vacc0123 = vacc0123p0;
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
v128_t vacc0123 = vacc0123p0;
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,794 | 28.418605 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p8c-minmax-avx-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p8c__avx_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
w += 40;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,824 | 31.823129 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p8c-minmax-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p8c__avx(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
w += 40;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,625 | 31.34965 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p8c-minmax-fma3-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p8c__fma3_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
w += 40;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,747 | 31.29932 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p8c-minmax-fma3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p8c__fma3(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
w += 40;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,522 | 30.629371 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p8c-minmax-neon-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p8c__neon_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi3x4567, vk3x4567);
// Add up all accumulators to vacc01234567p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 4);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 12);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 20);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w + 28);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 8);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 16);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 24);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w + 32);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,049 | 34.174419 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p8c-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p8c__neon(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi3x4567, vk3x4567);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 4);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 12);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 20);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w + 28);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 8);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 16);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 24);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w + 32);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,683 | 33.448485 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p8c-minmax-neonfma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p8c__neonfma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi3x4567, vk3x4567);
// Add up all accumulators to vacc01234567p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 4);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 12);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 20);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w + 28);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 8);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 16);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 24);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w + 32);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,052 | 34.19186 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p8c-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p8c__neonfma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi3x4567, vk3x4567);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 4);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 12);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 20);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w + 28);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 8);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 16);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 24);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w + 32);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,686 | 33.466667 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p8c-minmax-sse-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p8c__sse_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
__m128 vacc0123p0 = _mm_load_ps(w);
__m128 vacc4567p0 = _mm_load_ps(w + 4);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vk0x0123 = _mm_load_ps(w + 8);
const __m128 vk0x4567 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vk1x0123 = _mm_load_ps(w + 16);
const __m128 vk1x4567 = _mm_load_ps(w + 20);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
__m128 vacc4567p1 = _mm_mul_ps(vi1x4567, vk1x4567);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vk2x0123 = _mm_load_ps(w + 24);
const __m128 vk2x4567 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vk3x0123 = _mm_load_ps(w + 32);
const __m128 vk3x4567 = _mm_load_ps(w + 36);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi3x4567, vk3x4567));
w += 40;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
vacc4567p0 = _mm_add_ps(vacc4567p0, vacc4567p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
__m128 vacc4567 = _mm_max_ps(vacc4567p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
vacc4567 = _mm_min_ps(vacc4567, vmax);
_mm_storeu_ps(output, vacc0123);
_mm_storeu_ps(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 16);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 32);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
w += 4;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vk0x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vk1x0123 = _mm_load_ps(w + 16);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vk2x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vk3x0123 = _mm_load_ps(w + 32);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
// Add up all accumulators to vacc01234567p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,099 | 30.770833 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p8c-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p8c__sse(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
__m128 vacc0123p0 = _mm_load_ps(w);
__m128 vacc4567p0 = _mm_load_ps(w + 4);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vk0x0123 = _mm_load_ps(w + 8);
const __m128 vk0x4567 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vk1x0123 = _mm_load_ps(w + 16);
const __m128 vk1x4567 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi1x4567, vk1x4567));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vk2x0123 = _mm_load_ps(w + 24);
const __m128 vk2x4567 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vk3x0123 = _mm_load_ps(w + 32);
const __m128 vk3x4567 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi3x4567, vk3x4567));
w += 40;
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
__m128 vacc4567 = _mm_max_ps(vacc4567p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
vacc4567 = _mm_min_ps(vacc4567, vmax);
_mm_storeu_ps(output, vacc0123);
_mm_storeu_ps(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 32);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
w += 4;
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vk0x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vk1x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vk2x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vk3x0123 = _mm_load_ps(w + 32);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,789 | 30.297297 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p8c-minmax-wasmrelaxedsimd-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p8c__wasmrelaxedsimd_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
const v128_t vk3x4567 = wasm_v128_load(w + 36);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
vacc4567p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x4567, vk3x4567), vacc4567p1);
w += 40;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
v128_t vacc4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4567p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
vacc4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
w += 4;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,581 | 33.103627 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p8c-minmax-wasmrelaxedsimd-fma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p8c__wasmrelaxedsimd_fma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x4567, vk0x4567, vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x4567, vk2x4567, vacc4567p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
const v128_t vk3x4567 = wasm_v128_load(w + 36);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p1);
vacc4567p1 = __builtin_wasm_relaxed_madd_f32x4(vi3x4567, vk3x4567, vacc4567p1);
w += 40;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
v128_t vacc4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4567p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
vacc4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p1);
w += 4;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p1);
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,621 | 33.310881 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p8c-minmax-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p8c__wasmrelaxedsimd_fma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x4567, vk0x4567, vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x4567, vk1x4567, vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x4567, vk2x4567, vacc4567p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
const v128_t vk3x4567 = wasm_v128_load(w + 36);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x4567, vk3x4567, vacc4567p0);
w += 40;
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
v128_t vacc4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4567p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
vacc4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p0);
w += 4;
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p0);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,323 | 33 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p8c-minmax-wasmrelaxedsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p8c__wasmrelaxedsimd(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vk1x4567), vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
const v128_t vk3x4567 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x4567, vk3x4567), vacc4567p0);
w += 40;
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
v128_t vacc4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4567p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
vacc4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
w += 4;
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,271 | 32.72043 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p8c-minmax-wasmsimd-arm-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p8c__wasmsimd_arm_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
const v128_t vk3x4567 = wasm_v128_load(w + 36);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
vacc4567p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x4567, vk3x4567), vacc4567p1);
w += 40;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
v128_t vacc4567 = wasm_f32x4_max(vmin, vacc4567p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
vacc4567 = wasm_f32x4_min(vmax, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
w += 4;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,434 | 32.341969 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p8c-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p8c__wasmsimd_arm(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vk1x4567), vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
const v128_t vk3x4567 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x4567, vk3x4567), vacc4567p0);
w += 40;
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
v128_t vacc4567 = wasm_f32x4_max(vmin, vacc4567p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
vacc4567 = wasm_f32x4_min(vmax, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
w += 4;
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,124 | 31.930108 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p8c-minmax-wasmsimd-x86-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p8c__wasmsimd_x86_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
const v128_t vk3x4567 = wasm_v128_load(w + 36);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
vacc4567p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x4567, vk3x4567), vacc4567p1);
w += 40;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
v128_t vacc4567 = wasm_f32x4_pmax(vmin, vacc4567p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
vacc4567 = wasm_f32x4_pmin(vmax, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
w += 4;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,442 | 32.38342 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p8c-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p8c__wasmsimd_x86(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vk1x4567), vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
const v128_t vk3x4567 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x4567, vk3x4567), vacc4567p0);
w += 40;
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
v128_t vacc4567 = wasm_f32x4_pmax(vmin, vacc4567p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
vacc4567 = wasm_f32x4_pmin(vmax, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
w += 4;
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,132 | 31.973118 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p8c-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_ukernel_4p8c__wasmrelaxedsimd_fma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x4567, vk0x4567, vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x4567, vk1x4567, vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x4567, vk2x4567, vacc4567p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
const v128_t vk3x4567 = wasm_v128_load(w + 36);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x4567, vk3x4567, vacc4567p0);
w += 40;
const v128_t vacc0123 = vacc0123p0;
const v128_t vacc4567 = vacc4567p0;
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p0);
w += 4;
const v128_t vacc0123 = vacc0123p0;
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p0);
v128_t vacc0123 = vacc0123p0;
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,770 | 31.240223 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p8c-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_ukernel_4p8c__wasmsimd(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vk1x4567), vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
const v128_t vk3x4567 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x4567, vk3x4567), vacc4567p0);
w += 40;
const v128_t vacc0123 = vacc0123p0;
const v128_t vacc4567 = vacc4567p0;
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
w += 4;
const v128_t vacc0123 = vacc0123p0;
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
v128_t vacc0123 = vacc0123p0;
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,711 | 30.910615 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l16c16s1r-minmax-avx512f-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-avx512.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l16c16s1r__avx512f_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = channels;
for (; c >= 16; c -= 16) {
__m512 vaccp0 = _mm512_load_ps(w);
const __m512 vi0x0 = _mm512_loadu_ps(i0);
i0 += 16;
const __m512 vk0x0 = _mm512_load_ps(w + 16);
vaccp0 = _mm512_fmadd_ps(vi0x0, vk0x0, vaccp0);
const __m512 vi1x0 = _mm512_loadu_ps(i1);
i1 += 16;
const __m512 vk1x0 = _mm512_load_ps(w + 32);
__m512 vaccp1 = _mm512_mul_ps(vi1x0, vk1x0);
const __m512 vi2x0 = _mm512_loadu_ps(i2);
i2 += 16;
const __m512 vk2x0 = _mm512_load_ps(w + 48);
vaccp0 = _mm512_fmadd_ps(vi2x0, vk2x0, vaccp0);
const __m512 vi3x0 = _mm512_loadu_ps(i3);
i3 += 16;
const __m512 vk3x0 = _mm512_load_ps(w + 64);
vaccp1 = _mm512_fmadd_ps(vi3x0, vk3x0, vaccp1);
const __m512 vi4x0 = _mm512_loadu_ps(i4);
i4 += 16;
const __m512 vk4x0 = _mm512_load_ps(w + 80);
vaccp0 = _mm512_fmadd_ps(vi4x0, vk4x0, vaccp0);
w += 96;
// Add up all accumulators to vaccp0
vaccp0 = _mm512_add_ps(vaccp0, vaccp1);
_mm512_store_ps(b, vaccp0);
b += 16;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 15);
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
__m512 vaccp0 = _mm512_load_ps(w);
const __m512 vi0x0 = _mm512_maskz_loadu_ps(vmask, i0);
const __m512 vk0x0 = _mm512_load_ps(w + 16);
vaccp0 = _mm512_fmadd_ps(vi0x0, vk0x0, vaccp0);
const __m512 vi1x0 = _mm512_maskz_loadu_ps(vmask, i1);
const __m512 vk1x0 = _mm512_load_ps(w + 32);
__m512 vaccp1 = _mm512_mul_ps(vi1x0, vk1x0);
const __m512 vi2x0 = _mm512_maskz_loadu_ps(vmask, i2);
const __m512 vk2x0 = _mm512_load_ps(w + 48);
vaccp0 = _mm512_fmadd_ps(vi2x0, vk2x0, vaccp0);
const __m512 vi3x0 = _mm512_maskz_loadu_ps(vmask, i3);
const __m512 vk3x0 = _mm512_load_ps(w + 64);
vaccp1 = _mm512_fmadd_ps(vi3x0, vk3x0, vaccp1);
const __m512 vi4x0 = _mm512_maskz_loadu_ps(vmask, i4);
const __m512 vk4x0 = _mm512_load_ps(w + 80);
vaccp0 = _mm512_fmadd_ps(vi4x0, vk4x0, vaccp0);
w += 96;
// Add up all accumulators to vaccp0
vaccp0 = _mm512_add_ps(vaccp0, vaccp1);
_mm512_store_ps(b, vaccp0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 16; c -= 16) {
__m512 vaccp0 = _mm512_load_ps(b);
const __m512 vi0x0 = _mm512_loadu_ps(i0);
i0 += 16;
const __m512 vk0x0 = _mm512_load_ps(w);
vaccp0 = _mm512_fmadd_ps(vi0x0, vk0x0, vaccp0);
const __m512 vi1x0 = _mm512_loadu_ps(i1);
i1 += 16;
const __m512 vk1x0 = _mm512_load_ps(w + 16);
__m512 vaccp1 = _mm512_mul_ps(vi1x0, vk1x0);
const __m512 vi2x0 = _mm512_loadu_ps(i2);
i2 += 16;
const __m512 vk2x0 = _mm512_load_ps(w + 32);
vaccp0 = _mm512_fmadd_ps(vi2x0, vk2x0, vaccp0);
const __m512 vi3x0 = _mm512_loadu_ps(i3);
i3 += 16;
const __m512 vk3x0 = _mm512_load_ps(w + 48);
vaccp1 = _mm512_fmadd_ps(vi3x0, vk3x0, vaccp1);
const __m512 vi4x0 = _mm512_loadu_ps(i4);
i4 += 16;
const __m512 vk4x0 = _mm512_load_ps(w + 64);
vaccp0 = _mm512_fmadd_ps(vi4x0, vk4x0, vaccp0);
w += 80;
// Add up all accumulators to vaccp0
vaccp0 = _mm512_add_ps(vaccp0, vaccp1);
_mm512_store_ps(b, vaccp0);
b += 16;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 15);
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
__m512 vaccp0 = _mm512_load_ps(b);
const __m512 vi0x0 = _mm512_maskz_loadu_ps(vmask, i0);
const __m512 vk0x0 = _mm512_load_ps(w);
vaccp0 = _mm512_fmadd_ps(vi0x0, vk0x0, vaccp0);
const __m512 vi1x0 = _mm512_maskz_loadu_ps(vmask, i1);
const __m512 vk1x0 = _mm512_load_ps(w + 16);
__m512 vaccp1 = _mm512_mul_ps(vi1x0, vk1x0);
const __m512 vi2x0 = _mm512_maskz_loadu_ps(vmask, i2);
const __m512 vk2x0 = _mm512_load_ps(w + 32);
vaccp0 = _mm512_fmadd_ps(vi2x0, vk2x0, vaccp0);
const __m512 vi3x0 = _mm512_maskz_loadu_ps(vmask, i3);
const __m512 vk3x0 = _mm512_load_ps(w + 48);
vaccp1 = _mm512_fmadd_ps(vi3x0, vk3x0, vaccp1);
const __m512 vi4x0 = _mm512_maskz_loadu_ps(vmask, i4);
const __m512 vk4x0 = _mm512_load_ps(w + 64);
vaccp0 = _mm512_fmadd_ps(vi4x0, vk4x0, vaccp0);
w += 80;
// Add up all accumulators to vaccp0
vaccp0 = _mm512_add_ps(vaccp0, vaccp1);
_mm512_store_ps(b, vaccp0);
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 16; c -= 16) {
__m512 vaccp0 = _mm512_load_ps(b);
b += 16;
const __m512 vi0x0 = _mm512_loadu_ps(i0);
i0 += 16;
__m512 vk0x0 = _mm512_load_ps(w);
vaccp0 = _mm512_fmadd_ps(vi0x0, vk0x0, vaccp0);
const __m512 vi1x0 = _mm512_loadu_ps(i1);
i1 += 16;
__m512 vk1x0 = _mm512_load_ps(w + 16);
__m512 vaccp1 = _mm512_mul_ps(vi1x0, vk1x0);
const __m512 vi2x0 = _mm512_loadu_ps(i2);
i2 += 16;
__m512 vk2x0 = _mm512_load_ps(w + 32);
vaccp0 = _mm512_fmadd_ps(vi2x0, vk2x0, vaccp0);
const __m512 vi3x0 = _mm512_loadu_ps(i3);
i3 += 16;
__m512 vk3x0 = _mm512_load_ps(w + 48);
vaccp1 = _mm512_fmadd_ps(vi3x0, vk3x0, vaccp1);
const __m512 vi4x0 = _mm512_loadu_ps(i4);
i4 += 16;
__m512 vk4x0 = _mm512_load_ps(w + 64);
vaccp0 = _mm512_fmadd_ps(vi4x0, vk4x0, vaccp0);
w += 80;
// Add up all accumulators to vaccp0
vaccp0 = _mm512_add_ps(vaccp0, vaccp1);
__m512 vacc = _mm512_max_ps(vmin, vaccp0);
vacc = _mm512_min_ps(vmax, vacc);
_mm512_storeu_ps(output, vacc);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 15);
__m512 vaccp0 = _mm512_load_ps(b);
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
const __m512 vi0x0 = _mm512_maskz_loadu_ps(vmask, i0);
__m512 vk0x0 = _mm512_load_ps(w);
vaccp0 = _mm512_fmadd_ps(vi0x0, vk0x0, vaccp0);
const __m512 vi1x0 = _mm512_maskz_loadu_ps(vmask, i1);
__m512 vk1x0 = _mm512_load_ps(w + 16);
__m512 vaccp1 = _mm512_mul_ps(vi1x0, vk1x0);
const __m512 vi2x0 = _mm512_maskz_loadu_ps(vmask, i2);
__m512 vk2x0 = _mm512_load_ps(w + 32);
vaccp0 = _mm512_fmadd_ps(vi2x0, vk2x0, vaccp0);
const __m512 vi3x0 = _mm512_maskz_loadu_ps(vmask, i3);
__m512 vk3x0 = _mm512_load_ps(w + 48);
vaccp1 = _mm512_fmadd_ps(vi3x0, vk3x0, vaccp1);
const __m512 vi4x0 = _mm512_maskz_loadu_ps(vmask, i4);
__m512 vk4x0 = _mm512_load_ps(w + 64);
vaccp0 = _mm512_fmadd_ps(vi4x0, vk4x0, vaccp0);
// Add up all accumulators to vaccp0
vaccp0 = _mm512_add_ps(vaccp0, vaccp1);
__m512 vacc = _mm512_max_ps(vmin, vaccp0);
vacc = _mm512_min_ps(vmax, vacc);
_mm512_mask_storeu_ps(output, vmask, vacc);
output += c;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,560 | 27.758706 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l16c16s1r-minmax-avx512f.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-avx512.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l16c16s1r__avx512f(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = channels;
for (; c >= 16; c -= 16) {
__m512 vaccp0 = _mm512_load_ps(w);
const __m512 vi0x0 = _mm512_loadu_ps(i0);
i0 += 16;
const __m512 vk0x0 = _mm512_load_ps(w + 16);
vaccp0 = _mm512_fmadd_ps(vi0x0, vk0x0, vaccp0);
const __m512 vi1x0 = _mm512_loadu_ps(i1);
i1 += 16;
const __m512 vk1x0 = _mm512_load_ps(w + 32);
vaccp0 = _mm512_fmadd_ps(vi1x0, vk1x0, vaccp0);
const __m512 vi2x0 = _mm512_loadu_ps(i2);
i2 += 16;
const __m512 vk2x0 = _mm512_load_ps(w + 48);
vaccp0 = _mm512_fmadd_ps(vi2x0, vk2x0, vaccp0);
const __m512 vi3x0 = _mm512_loadu_ps(i3);
i3 += 16;
const __m512 vk3x0 = _mm512_load_ps(w + 64);
vaccp0 = _mm512_fmadd_ps(vi3x0, vk3x0, vaccp0);
const __m512 vi4x0 = _mm512_loadu_ps(i4);
i4 += 16;
const __m512 vk4x0 = _mm512_load_ps(w + 80);
vaccp0 = _mm512_fmadd_ps(vi4x0, vk4x0, vaccp0);
w += 96;
_mm512_store_ps(b, vaccp0);
b += 16;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 15);
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
__m512 vaccp0 = _mm512_load_ps(w);
const __m512 vi0x0 = _mm512_maskz_loadu_ps(vmask, i0);
const __m512 vk0x0 = _mm512_load_ps(w + 16);
vaccp0 = _mm512_fmadd_ps(vi0x0, vk0x0, vaccp0);
const __m512 vi1x0 = _mm512_maskz_loadu_ps(vmask, i1);
const __m512 vk1x0 = _mm512_load_ps(w + 32);
vaccp0 = _mm512_fmadd_ps(vi1x0, vk1x0, vaccp0);
const __m512 vi2x0 = _mm512_maskz_loadu_ps(vmask, i2);
const __m512 vk2x0 = _mm512_load_ps(w + 48);
vaccp0 = _mm512_fmadd_ps(vi2x0, vk2x0, vaccp0);
const __m512 vi3x0 = _mm512_maskz_loadu_ps(vmask, i3);
const __m512 vk3x0 = _mm512_load_ps(w + 64);
vaccp0 = _mm512_fmadd_ps(vi3x0, vk3x0, vaccp0);
const __m512 vi4x0 = _mm512_maskz_loadu_ps(vmask, i4);
const __m512 vk4x0 = _mm512_load_ps(w + 80);
vaccp0 = _mm512_fmadd_ps(vi4x0, vk4x0, vaccp0);
w += 96;
_mm512_store_ps(b, vaccp0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 16; c -= 16) {
__m512 vaccp0 = _mm512_load_ps(b);
const __m512 vi0x0 = _mm512_loadu_ps(i0);
i0 += 16;
const __m512 vk0x0 = _mm512_load_ps(w);
vaccp0 = _mm512_fmadd_ps(vi0x0, vk0x0, vaccp0);
const __m512 vi1x0 = _mm512_loadu_ps(i1);
i1 += 16;
const __m512 vk1x0 = _mm512_load_ps(w + 16);
vaccp0 = _mm512_fmadd_ps(vi1x0, vk1x0, vaccp0);
const __m512 vi2x0 = _mm512_loadu_ps(i2);
i2 += 16;
const __m512 vk2x0 = _mm512_load_ps(w + 32);
vaccp0 = _mm512_fmadd_ps(vi2x0, vk2x0, vaccp0);
const __m512 vi3x0 = _mm512_loadu_ps(i3);
i3 += 16;
const __m512 vk3x0 = _mm512_load_ps(w + 48);
vaccp0 = _mm512_fmadd_ps(vi3x0, vk3x0, vaccp0);
const __m512 vi4x0 = _mm512_loadu_ps(i4);
i4 += 16;
const __m512 vk4x0 = _mm512_load_ps(w + 64);
vaccp0 = _mm512_fmadd_ps(vi4x0, vk4x0, vaccp0);
w += 80;
_mm512_store_ps(b, vaccp0);
b += 16;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 15);
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
__m512 vaccp0 = _mm512_load_ps(b);
const __m512 vi0x0 = _mm512_maskz_loadu_ps(vmask, i0);
const __m512 vk0x0 = _mm512_load_ps(w);
vaccp0 = _mm512_fmadd_ps(vi0x0, vk0x0, vaccp0);
const __m512 vi1x0 = _mm512_maskz_loadu_ps(vmask, i1);
const __m512 vk1x0 = _mm512_load_ps(w + 16);
vaccp0 = _mm512_fmadd_ps(vi1x0, vk1x0, vaccp0);
const __m512 vi2x0 = _mm512_maskz_loadu_ps(vmask, i2);
const __m512 vk2x0 = _mm512_load_ps(w + 32);
vaccp0 = _mm512_fmadd_ps(vi2x0, vk2x0, vaccp0);
const __m512 vi3x0 = _mm512_maskz_loadu_ps(vmask, i3);
const __m512 vk3x0 = _mm512_load_ps(w + 48);
vaccp0 = _mm512_fmadd_ps(vi3x0, vk3x0, vaccp0);
const __m512 vi4x0 = _mm512_maskz_loadu_ps(vmask, i4);
const __m512 vk4x0 = _mm512_load_ps(w + 64);
vaccp0 = _mm512_fmadd_ps(vi4x0, vk4x0, vaccp0);
w += 80;
_mm512_store_ps(b, vaccp0);
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 16; c -= 16) {
__m512 vaccp0 = _mm512_load_ps(b);
b += 16;
const __m512 vi0x0 = _mm512_loadu_ps(i0);
i0 += 16;
__m512 vk0x0 = _mm512_load_ps(w);
vaccp0 = _mm512_fmadd_ps(vi0x0, vk0x0, vaccp0);
const __m512 vi1x0 = _mm512_loadu_ps(i1);
i1 += 16;
__m512 vk1x0 = _mm512_load_ps(w + 16);
vaccp0 = _mm512_fmadd_ps(vi1x0, vk1x0, vaccp0);
const __m512 vi2x0 = _mm512_loadu_ps(i2);
i2 += 16;
__m512 vk2x0 = _mm512_load_ps(w + 32);
vaccp0 = _mm512_fmadd_ps(vi2x0, vk2x0, vaccp0);
const __m512 vi3x0 = _mm512_loadu_ps(i3);
i3 += 16;
__m512 vk3x0 = _mm512_load_ps(w + 48);
vaccp0 = _mm512_fmadd_ps(vi3x0, vk3x0, vaccp0);
const __m512 vi4x0 = _mm512_loadu_ps(i4);
i4 += 16;
__m512 vk4x0 = _mm512_load_ps(w + 64);
vaccp0 = _mm512_fmadd_ps(vi4x0, vk4x0, vaccp0);
w += 80;
__m512 vacc = _mm512_max_ps(vmin, vaccp0);
vacc = _mm512_min_ps(vmax, vacc);
_mm512_storeu_ps(output, vacc);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 15);
__m512 vaccp0 = _mm512_load_ps(b);
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
const __m512 vi0x0 = _mm512_maskz_loadu_ps(vmask, i0);
__m512 vk0x0 = _mm512_load_ps(w);
vaccp0 = _mm512_fmadd_ps(vi0x0, vk0x0, vaccp0);
const __m512 vi1x0 = _mm512_maskz_loadu_ps(vmask, i1);
__m512 vk1x0 = _mm512_load_ps(w + 16);
vaccp0 = _mm512_fmadd_ps(vi1x0, vk1x0, vaccp0);
const __m512 vi2x0 = _mm512_maskz_loadu_ps(vmask, i2);
__m512 vk2x0 = _mm512_load_ps(w + 32);
vaccp0 = _mm512_fmadd_ps(vi2x0, vk2x0, vaccp0);
const __m512 vi3x0 = _mm512_maskz_loadu_ps(vmask, i3);
__m512 vk3x0 = _mm512_load_ps(w + 48);
vaccp0 = _mm512_fmadd_ps(vi3x0, vk3x0, vaccp0);
const __m512 vi4x0 = _mm512_maskz_loadu_ps(vmask, i4);
__m512 vk4x0 = _mm512_load_ps(w + 64);
vaccp0 = _mm512_fmadd_ps(vi4x0, vk4x0, vaccp0);
__m512 vacc = _mm512_max_ps(vmin, vaccp0);
vacc = _mm512_min_ps(vmax, vacc);
_mm512_mask_storeu_ps(output, vmask, vacc);
output += c;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,015 | 27.246154 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l16c4s4r-minmax-sse-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l16c4s4r__sse_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 16; c -= 16) {
__m128 vacc0123p0 = _mm_load_ps(w);
__m128 vacc4567p0 = _mm_load_ps(w + 4);
__m128 vacc89ABp0 = _mm_load_ps(w + 8);
__m128 vaccCDEFp0 = _mm_load_ps(w + 12);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
const __m128 vi0x89AB = _mm_loadu_ps(i0 + 8);
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 12);
i0 += 16;
const __m128 vk0x0123 = _mm_load_ps(w + 16);
const __m128 vk0x4567 = _mm_load_ps(w + 20);
const __m128 vk0x89AB = _mm_load_ps(w + 24);
const __m128 vk0xCDEF = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
vacc89ABp0 = _mm_add_ps(vacc89ABp0, _mm_mul_ps(vi0x89AB, vk0x89AB));
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, _mm_mul_ps(vi0xCDEF, vk0xCDEF));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
const __m128 vi1x89AB = _mm_loadu_ps(i1 + 8);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 12);
i1 += 16;
const __m128 vk1x0123 = _mm_load_ps(w + 32);
const __m128 vk1x4567 = _mm_load_ps(w + 36);
const __m128 vk1x89AB = _mm_load_ps(w + 40);
const __m128 vk1xCDEF = _mm_load_ps(w + 44);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
__m128 vacc4567p1 = _mm_mul_ps(vi1x4567, vk1x4567);
__m128 vacc89ABp1 = _mm_mul_ps(vi1x89AB, vk1x89AB);
__m128 vaccCDEFp1 = _mm_mul_ps(vi1xCDEF, vk1xCDEF);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
const __m128 vi2x89AB = _mm_loadu_ps(i2 + 8);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 12);
i2 += 16;
const __m128 vk2x0123 = _mm_load_ps(w + 48);
const __m128 vk2x4567 = _mm_load_ps(w + 52);
const __m128 vk2x89AB = _mm_load_ps(w + 56);
const __m128 vk2xCDEF = _mm_load_ps(w + 60);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
vacc89ABp0 = _mm_add_ps(vacc89ABp0, _mm_mul_ps(vi2x89AB, vk2x89AB));
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, _mm_mul_ps(vi2xCDEF, vk2xCDEF));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
const __m128 vi3x89AB = _mm_loadu_ps(i3 + 8);
const __m128 vi3xCDEF = _mm_loadu_ps(i3 + 12);
i3 += 16;
const __m128 vk3x0123 = _mm_load_ps(w + 64);
const __m128 vk3x4567 = _mm_load_ps(w + 68);
const __m128 vk3x89AB = _mm_load_ps(w + 72);
const __m128 vk3xCDEF = _mm_load_ps(w + 76);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi3x4567, vk3x4567));
vacc89ABp1 = _mm_add_ps(vacc89ABp1, _mm_mul_ps(vi3x89AB, vk3x89AB));
vaccCDEFp1 = _mm_add_ps(vaccCDEFp1, _mm_mul_ps(vi3xCDEF, vk3xCDEF));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
const __m128 vi4x89AB = _mm_loadu_ps(i4 + 8);
const __m128 vi4xCDEF = _mm_loadu_ps(i4 + 12);
i4 += 16;
const __m128 vk4x0123 = _mm_load_ps(w + 80);
const __m128 vk4x4567 = _mm_load_ps(w + 84);
const __m128 vk4x89AB = _mm_load_ps(w + 88);
const __m128 vk4xCDEF = _mm_load_ps(w + 92);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
vacc89ABp0 = _mm_add_ps(vacc89ABp0, _mm_mul_ps(vi4x89AB, vk4x89AB));
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, _mm_mul_ps(vi4xCDEF, vk4xCDEF));
w += 96;
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
vacc4567p0 = _mm_add_ps(vacc4567p0, vacc4567p1);
vacc89ABp0 = _mm_add_ps(vacc89ABp0, vacc89ABp1);
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, vaccCDEFp1);
_mm_store_ps(b, vacc0123p0);
_mm_store_ps(b + 4, vacc4567p0);
_mm_store_ps(b + 8, vacc89ABp0);
_mm_store_ps(b + 12, vaccCDEFp0);
b += 16;
}
for (; c != 0; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
w += 24;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 4);
for (; c >= 16; c -= 16) {
__m128 vacc0123p0 = _mm_load_ps(b);
__m128 vacc4567p0 = _mm_load_ps(b + 4);
__m128 vacc89ABp0 = _mm_load_ps(b + 8);
__m128 vaccCDEFp0 = _mm_load_ps(b + 12);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
const __m128 vi0x89AB = _mm_loadu_ps(i0 + 8);
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 12);
i0 += 16;
const __m128 vk0x0123 = _mm_load_ps(w);
const __m128 vk0x4567 = _mm_load_ps(w + 4);
const __m128 vk0x89AB = _mm_load_ps(w + 8);
const __m128 vk0xCDEF = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
vacc89ABp0 = _mm_add_ps(vacc89ABp0, _mm_mul_ps(vi0x89AB, vk0x89AB));
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, _mm_mul_ps(vi0xCDEF, vk0xCDEF));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
const __m128 vi1x89AB = _mm_loadu_ps(i1 + 8);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 12);
i1 += 16;
const __m128 vk1x0123 = _mm_load_ps(w + 16);
const __m128 vk1x4567 = _mm_load_ps(w + 20);
const __m128 vk1x89AB = _mm_load_ps(w + 24);
const __m128 vk1xCDEF = _mm_load_ps(w + 28);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
__m128 vacc4567p1 = _mm_mul_ps(vi1x4567, vk1x4567);
__m128 vacc89ABp1 = _mm_mul_ps(vi1x89AB, vk1x89AB);
__m128 vaccCDEFp1 = _mm_mul_ps(vi1xCDEF, vk1xCDEF);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
const __m128 vi2x89AB = _mm_loadu_ps(i2 + 8);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 12);
i2 += 16;
const __m128 vk2x0123 = _mm_load_ps(w + 32);
const __m128 vk2x4567 = _mm_load_ps(w + 36);
const __m128 vk2x89AB = _mm_load_ps(w + 40);
const __m128 vk2xCDEF = _mm_load_ps(w + 44);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
vacc89ABp0 = _mm_add_ps(vacc89ABp0, _mm_mul_ps(vi2x89AB, vk2x89AB));
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, _mm_mul_ps(vi2xCDEF, vk2xCDEF));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
const __m128 vi3x89AB = _mm_loadu_ps(i3 + 8);
const __m128 vi3xCDEF = _mm_loadu_ps(i3 + 12);
i3 += 16;
const __m128 vk3x0123 = _mm_load_ps(w + 48);
const __m128 vk3x4567 = _mm_load_ps(w + 52);
const __m128 vk3x89AB = _mm_load_ps(w + 56);
const __m128 vk3xCDEF = _mm_load_ps(w + 60);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi3x4567, vk3x4567));
vacc89ABp1 = _mm_add_ps(vacc89ABp1, _mm_mul_ps(vi3x89AB, vk3x89AB));
vaccCDEFp1 = _mm_add_ps(vaccCDEFp1, _mm_mul_ps(vi3xCDEF, vk3xCDEF));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
const __m128 vi4x89AB = _mm_loadu_ps(i4 + 8);
const __m128 vi4xCDEF = _mm_loadu_ps(i4 + 12);
i4 += 16;
const __m128 vk4x0123 = _mm_load_ps(w + 64);
const __m128 vk4x4567 = _mm_load_ps(w + 68);
const __m128 vk4x89AB = _mm_load_ps(w + 72);
const __m128 vk4xCDEF = _mm_load_ps(w + 76);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
vacc89ABp0 = _mm_add_ps(vacc89ABp0, _mm_mul_ps(vi4x89AB, vk4x89AB));
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, _mm_mul_ps(vi4xCDEF, vk4xCDEF));
w += 80;
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
vacc4567p0 = _mm_add_ps(vacc4567p0, vacc4567p1);
vacc89ABp0 = _mm_add_ps(vacc89ABp0, vacc89ABp1);
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, vaccCDEFp1);
_mm_store_ps(b, vacc0123p0);
_mm_store_ps(b + 4, vacc4567p0);
_mm_store_ps(b + 8, vacc89ABp0);
_mm_store_ps(b + 12, vaccCDEFp0);
b += 16;
}
for (; c != 0; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 4);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
w += 20;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 16; c -= 16) {
__m128 vacc0123p0 = _mm_load_ps(b);
__m128 vacc4567p0 = _mm_load_ps(b + 4);
__m128 vacc89ABp0 = _mm_load_ps(b + 8);
__m128 vaccCDEFp0 = _mm_load_ps(b + 12);
b += 16;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
const __m128 vi0x89AB = _mm_loadu_ps(i0 + 8);
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 12);
i0 += 16;
__m128 vk0x0123 = _mm_load_ps(w);
__m128 vk0x4567 = _mm_load_ps(w + 4);
__m128 vk0x89AB = _mm_load_ps(w + 8);
__m128 vk0xCDEF = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
vacc89ABp0 = _mm_add_ps(vacc89ABp0, _mm_mul_ps(vi0x89AB, vk0x89AB));
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, _mm_mul_ps(vi0xCDEF, vk0xCDEF));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
const __m128 vi1x89AB = _mm_loadu_ps(i1 + 8);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 12);
i1 += 16;
__m128 vk1x0123 = _mm_load_ps(w + 16);
__m128 vk1x4567 = _mm_load_ps(w + 20);
__m128 vk1x89AB = _mm_load_ps(w + 24);
__m128 vk1xCDEF = _mm_load_ps(w + 28);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
__m128 vacc4567p1 = _mm_mul_ps(vi1x4567, vk1x4567);
__m128 vacc89ABp1 = _mm_mul_ps(vi1x89AB, vk1x89AB);
__m128 vaccCDEFp1 = _mm_mul_ps(vi1xCDEF, vk1xCDEF);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
const __m128 vi2x89AB = _mm_loadu_ps(i2 + 8);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 12);
i2 += 16;
__m128 vk2x0123 = _mm_load_ps(w + 32);
__m128 vk2x4567 = _mm_load_ps(w + 36);
__m128 vk2x89AB = _mm_load_ps(w + 40);
__m128 vk2xCDEF = _mm_load_ps(w + 44);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
vacc89ABp0 = _mm_add_ps(vacc89ABp0, _mm_mul_ps(vi2x89AB, vk2x89AB));
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, _mm_mul_ps(vi2xCDEF, vk2xCDEF));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
const __m128 vi3x89AB = _mm_loadu_ps(i3 + 8);
const __m128 vi3xCDEF = _mm_loadu_ps(i3 + 12);
i3 += 16;
__m128 vk3x0123 = _mm_load_ps(w + 48);
__m128 vk3x4567 = _mm_load_ps(w + 52);
__m128 vk3x89AB = _mm_load_ps(w + 56);
__m128 vk3xCDEF = _mm_load_ps(w + 60);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi3x4567, vk3x4567));
vacc89ABp1 = _mm_add_ps(vacc89ABp1, _mm_mul_ps(vi3x89AB, vk3x89AB));
vaccCDEFp1 = _mm_add_ps(vaccCDEFp1, _mm_mul_ps(vi3xCDEF, vk3xCDEF));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
const __m128 vi4x89AB = _mm_loadu_ps(i4 + 8);
const __m128 vi4xCDEF = _mm_loadu_ps(i4 + 12);
i4 += 16;
__m128 vk4x0123 = _mm_load_ps(w + 64);
__m128 vk4x4567 = _mm_load_ps(w + 68);
__m128 vk4x89AB = _mm_load_ps(w + 72);
__m128 vk4xCDEF = _mm_load_ps(w + 76);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
vacc89ABp0 = _mm_add_ps(vacc89ABp0, _mm_mul_ps(vi4x89AB, vk4x89AB));
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, _mm_mul_ps(vi4xCDEF, vk4xCDEF));
w += 80;
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
vacc4567p0 = _mm_add_ps(vacc4567p0, vacc4567p1);
vacc89ABp0 = _mm_add_ps(vacc89ABp0, vacc89ABp1);
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, vaccCDEFp1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
__m128 vacc4567 = _mm_max_ps(vacc4567p0, vmin);
__m128 vacc89AB = _mm_max_ps(vacc89ABp0, vmin);
__m128 vaccCDEF = _mm_max_ps(vaccCDEFp0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
vacc4567 = _mm_min_ps(vacc4567, vmax);
vacc89AB = _mm_min_ps(vacc89AB, vmax);
vaccCDEF = _mm_min_ps(vaccCDEF, vmax);
_mm_storeu_ps(output, vacc0123);
_mm_storeu_ps(output + 4, vacc4567);
_mm_storeu_ps(output + 8, vacc89AB);
_mm_storeu_ps(output + 12, vaccCDEF);
output += 16;
}
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(b);
b += 4;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vk1x0123 = _mm_load_ps(w + 4);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
w += 20;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
__m128 vk1x0123 = _mm_load_ps(w + 4);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,086 | 35.587956 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l16c4s4r-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l16c4s4r__sse(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 16; c -= 16) {
__m128 vacc0123p0 = _mm_load_ps(w);
__m128 vacc4567p0 = _mm_load_ps(w + 4);
__m128 vacc89ABp0 = _mm_load_ps(w + 8);
__m128 vaccCDEFp0 = _mm_load_ps(w + 12);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
const __m128 vi0x89AB = _mm_loadu_ps(i0 + 8);
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 12);
i0 += 16;
const __m128 vk0x0123 = _mm_load_ps(w + 16);
const __m128 vk0x4567 = _mm_load_ps(w + 20);
const __m128 vk0x89AB = _mm_load_ps(w + 24);
const __m128 vk0xCDEF = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
vacc89ABp0 = _mm_add_ps(vacc89ABp0, _mm_mul_ps(vi0x89AB, vk0x89AB));
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, _mm_mul_ps(vi0xCDEF, vk0xCDEF));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
const __m128 vi1x89AB = _mm_loadu_ps(i1 + 8);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 12);
i1 += 16;
const __m128 vk1x0123 = _mm_load_ps(w + 32);
const __m128 vk1x4567 = _mm_load_ps(w + 36);
const __m128 vk1x89AB = _mm_load_ps(w + 40);
const __m128 vk1xCDEF = _mm_load_ps(w + 44);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi1x4567, vk1x4567));
vacc89ABp0 = _mm_add_ps(vacc89ABp0, _mm_mul_ps(vi1x89AB, vk1x89AB));
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, _mm_mul_ps(vi1xCDEF, vk1xCDEF));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
const __m128 vi2x89AB = _mm_loadu_ps(i2 + 8);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 12);
i2 += 16;
const __m128 vk2x0123 = _mm_load_ps(w + 48);
const __m128 vk2x4567 = _mm_load_ps(w + 52);
const __m128 vk2x89AB = _mm_load_ps(w + 56);
const __m128 vk2xCDEF = _mm_load_ps(w + 60);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
vacc89ABp0 = _mm_add_ps(vacc89ABp0, _mm_mul_ps(vi2x89AB, vk2x89AB));
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, _mm_mul_ps(vi2xCDEF, vk2xCDEF));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
const __m128 vi3x89AB = _mm_loadu_ps(i3 + 8);
const __m128 vi3xCDEF = _mm_loadu_ps(i3 + 12);
i3 += 16;
const __m128 vk3x0123 = _mm_load_ps(w + 64);
const __m128 vk3x4567 = _mm_load_ps(w + 68);
const __m128 vk3x89AB = _mm_load_ps(w + 72);
const __m128 vk3xCDEF = _mm_load_ps(w + 76);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi3x4567, vk3x4567));
vacc89ABp0 = _mm_add_ps(vacc89ABp0, _mm_mul_ps(vi3x89AB, vk3x89AB));
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, _mm_mul_ps(vi3xCDEF, vk3xCDEF));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
const __m128 vi4x89AB = _mm_loadu_ps(i4 + 8);
const __m128 vi4xCDEF = _mm_loadu_ps(i4 + 12);
i4 += 16;
const __m128 vk4x0123 = _mm_load_ps(w + 80);
const __m128 vk4x4567 = _mm_load_ps(w + 84);
const __m128 vk4x89AB = _mm_load_ps(w + 88);
const __m128 vk4xCDEF = _mm_load_ps(w + 92);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
vacc89ABp0 = _mm_add_ps(vacc89ABp0, _mm_mul_ps(vi4x89AB, vk4x89AB));
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, _mm_mul_ps(vi4xCDEF, vk4xCDEF));
w += 96;
_mm_store_ps(b, vacc0123p0);
_mm_store_ps(b + 4, vacc4567p0);
_mm_store_ps(b + 8, vacc89ABp0);
_mm_store_ps(b + 12, vaccCDEFp0);
b += 16;
}
for (; c != 0; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
w += 24;
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 4);
for (; c >= 16; c -= 16) {
__m128 vacc0123p0 = _mm_load_ps(b);
__m128 vacc4567p0 = _mm_load_ps(b + 4);
__m128 vacc89ABp0 = _mm_load_ps(b + 8);
__m128 vaccCDEFp0 = _mm_load_ps(b + 12);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
const __m128 vi0x89AB = _mm_loadu_ps(i0 + 8);
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 12);
i0 += 16;
const __m128 vk0x0123 = _mm_load_ps(w);
const __m128 vk0x4567 = _mm_load_ps(w + 4);
const __m128 vk0x89AB = _mm_load_ps(w + 8);
const __m128 vk0xCDEF = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
vacc89ABp0 = _mm_add_ps(vacc89ABp0, _mm_mul_ps(vi0x89AB, vk0x89AB));
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, _mm_mul_ps(vi0xCDEF, vk0xCDEF));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
const __m128 vi1x89AB = _mm_loadu_ps(i1 + 8);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 12);
i1 += 16;
const __m128 vk1x0123 = _mm_load_ps(w + 16);
const __m128 vk1x4567 = _mm_load_ps(w + 20);
const __m128 vk1x89AB = _mm_load_ps(w + 24);
const __m128 vk1xCDEF = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi1x4567, vk1x4567));
vacc89ABp0 = _mm_add_ps(vacc89ABp0, _mm_mul_ps(vi1x89AB, vk1x89AB));
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, _mm_mul_ps(vi1xCDEF, vk1xCDEF));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
const __m128 vi2x89AB = _mm_loadu_ps(i2 + 8);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 12);
i2 += 16;
const __m128 vk2x0123 = _mm_load_ps(w + 32);
const __m128 vk2x4567 = _mm_load_ps(w + 36);
const __m128 vk2x89AB = _mm_load_ps(w + 40);
const __m128 vk2xCDEF = _mm_load_ps(w + 44);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
vacc89ABp0 = _mm_add_ps(vacc89ABp0, _mm_mul_ps(vi2x89AB, vk2x89AB));
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, _mm_mul_ps(vi2xCDEF, vk2xCDEF));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
const __m128 vi3x89AB = _mm_loadu_ps(i3 + 8);
const __m128 vi3xCDEF = _mm_loadu_ps(i3 + 12);
i3 += 16;
const __m128 vk3x0123 = _mm_load_ps(w + 48);
const __m128 vk3x4567 = _mm_load_ps(w + 52);
const __m128 vk3x89AB = _mm_load_ps(w + 56);
const __m128 vk3xCDEF = _mm_load_ps(w + 60);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi3x4567, vk3x4567));
vacc89ABp0 = _mm_add_ps(vacc89ABp0, _mm_mul_ps(vi3x89AB, vk3x89AB));
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, _mm_mul_ps(vi3xCDEF, vk3xCDEF));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
const __m128 vi4x89AB = _mm_loadu_ps(i4 + 8);
const __m128 vi4xCDEF = _mm_loadu_ps(i4 + 12);
i4 += 16;
const __m128 vk4x0123 = _mm_load_ps(w + 64);
const __m128 vk4x4567 = _mm_load_ps(w + 68);
const __m128 vk4x89AB = _mm_load_ps(w + 72);
const __m128 vk4xCDEF = _mm_load_ps(w + 76);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
vacc89ABp0 = _mm_add_ps(vacc89ABp0, _mm_mul_ps(vi4x89AB, vk4x89AB));
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, _mm_mul_ps(vi4xCDEF, vk4xCDEF));
w += 80;
_mm_store_ps(b, vacc0123p0);
_mm_store_ps(b + 4, vacc4567p0);
_mm_store_ps(b + 8, vacc89ABp0);
_mm_store_ps(b + 12, vaccCDEFp0);
b += 16;
}
for (; c != 0; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
w += 20;
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 16; c -= 16) {
__m128 vacc0123p0 = _mm_load_ps(b);
__m128 vacc4567p0 = _mm_load_ps(b + 4);
__m128 vacc89ABp0 = _mm_load_ps(b + 8);
__m128 vaccCDEFp0 = _mm_load_ps(b + 12);
b += 16;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
const __m128 vi0x89AB = _mm_loadu_ps(i0 + 8);
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 12);
i0 += 16;
__m128 vk0x0123 = _mm_load_ps(w);
__m128 vk0x4567 = _mm_load_ps(w + 4);
__m128 vk0x89AB = _mm_load_ps(w + 8);
__m128 vk0xCDEF = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
vacc89ABp0 = _mm_add_ps(vacc89ABp0, _mm_mul_ps(vi0x89AB, vk0x89AB));
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, _mm_mul_ps(vi0xCDEF, vk0xCDEF));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
const __m128 vi1x89AB = _mm_loadu_ps(i1 + 8);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 12);
i1 += 16;
__m128 vk1x0123 = _mm_load_ps(w + 16);
__m128 vk1x4567 = _mm_load_ps(w + 20);
__m128 vk1x89AB = _mm_load_ps(w + 24);
__m128 vk1xCDEF = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi1x4567, vk1x4567));
vacc89ABp0 = _mm_add_ps(vacc89ABp0, _mm_mul_ps(vi1x89AB, vk1x89AB));
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, _mm_mul_ps(vi1xCDEF, vk1xCDEF));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
const __m128 vi2x89AB = _mm_loadu_ps(i2 + 8);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 12);
i2 += 16;
__m128 vk2x0123 = _mm_load_ps(w + 32);
__m128 vk2x4567 = _mm_load_ps(w + 36);
__m128 vk2x89AB = _mm_load_ps(w + 40);
__m128 vk2xCDEF = _mm_load_ps(w + 44);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
vacc89ABp0 = _mm_add_ps(vacc89ABp0, _mm_mul_ps(vi2x89AB, vk2x89AB));
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, _mm_mul_ps(vi2xCDEF, vk2xCDEF));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
const __m128 vi3x89AB = _mm_loadu_ps(i3 + 8);
const __m128 vi3xCDEF = _mm_loadu_ps(i3 + 12);
i3 += 16;
__m128 vk3x0123 = _mm_load_ps(w + 48);
__m128 vk3x4567 = _mm_load_ps(w + 52);
__m128 vk3x89AB = _mm_load_ps(w + 56);
__m128 vk3xCDEF = _mm_load_ps(w + 60);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi3x4567, vk3x4567));
vacc89ABp0 = _mm_add_ps(vacc89ABp0, _mm_mul_ps(vi3x89AB, vk3x89AB));
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, _mm_mul_ps(vi3xCDEF, vk3xCDEF));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
const __m128 vi4x89AB = _mm_loadu_ps(i4 + 8);
const __m128 vi4xCDEF = _mm_loadu_ps(i4 + 12);
i4 += 16;
__m128 vk4x0123 = _mm_load_ps(w + 64);
__m128 vk4x4567 = _mm_load_ps(w + 68);
__m128 vk4x89AB = _mm_load_ps(w + 72);
__m128 vk4xCDEF = _mm_load_ps(w + 76);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
vacc89ABp0 = _mm_add_ps(vacc89ABp0, _mm_mul_ps(vi4x89AB, vk4x89AB));
vaccCDEFp0 = _mm_add_ps(vaccCDEFp0, _mm_mul_ps(vi4xCDEF, vk4xCDEF));
w += 80;
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
__m128 vacc4567 = _mm_max_ps(vacc4567p0, vmin);
__m128 vacc89AB = _mm_max_ps(vacc89ABp0, vmin);
__m128 vaccCDEF = _mm_max_ps(vaccCDEFp0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
vacc4567 = _mm_min_ps(vacc4567, vmax);
vacc89AB = _mm_min_ps(vacc89AB, vmax);
vaccCDEF = _mm_min_ps(vaccCDEF, vmax);
_mm_storeu_ps(output, vacc0123);
_mm_storeu_ps(output + 4, vacc4567);
_mm_storeu_ps(output + 8, vacc89AB);
_mm_storeu_ps(output + 12, vaccCDEF);
output += 16;
}
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(b);
b += 4;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vk1x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
w += 20;
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
__m128 vk1x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 22,050 | 35.268092 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l16c8s4r-minmax-avx-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l16c8s4r__avx_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(w + 8);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
const __m256 vk0x89ABCDEF = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi0x89ABCDEF, vk0x89ABCDEF));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
const __m256 vk1x89ABCDEF = _mm256_load_ps(w + 40);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
__m256 vacc89ABCDEFp1 = _mm256_mul_ps(vi1x89ABCDEF, vk1x89ABCDEF);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
const __m256 vk2x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi2x89ABCDEF, vk2x89ABCDEF));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
i3 += 16;
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
const __m256 vk3x89ABCDEF = _mm256_load_ps(w + 72);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
vacc89ABCDEFp1 = _mm256_add_ps(vacc89ABCDEFp1, _mm256_mul_ps(vi3x89ABCDEF, vk3x89ABCDEF));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8);
i4 += 16;
const __m256 vk4x01234567 = _mm256_load_ps(w + 80);
const __m256 vk4x89ABCDEF = _mm256_load_ps(w + 88);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi4x89ABCDEF, vk4x89ABCDEF));
w += 96;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, vacc89ABCDEFp1);
_mm256_store_ps(b, vacc01234567p0);
_mm256_store_ps(b + 8, vacc89ABCDEFp0);
b += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
w += 48;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
w += 48;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 4);
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(b + 8);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
const __m256 vk0x01234567 = _mm256_load_ps(w);
const __m256 vk0x89ABCDEF = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi0x89ABCDEF, vk0x89ABCDEF));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
const __m256 vk1x89ABCDEF = _mm256_load_ps(w + 24);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
__m256 vacc89ABCDEFp1 = _mm256_mul_ps(vi1x89ABCDEF, vk1x89ABCDEF);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
const __m256 vk2x01234567 = _mm256_load_ps(w + 32);
const __m256 vk2x89ABCDEF = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi2x89ABCDEF, vk2x89ABCDEF));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
i3 += 16;
const __m256 vk3x01234567 = _mm256_load_ps(w + 48);
const __m256 vk3x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
vacc89ABCDEFp1 = _mm256_add_ps(vacc89ABCDEFp1, _mm256_mul_ps(vi3x89ABCDEF, vk3x89ABCDEF));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8);
i4 += 16;
const __m256 vk4x01234567 = _mm256_load_ps(w + 64);
const __m256 vk4x89ABCDEF = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi4x89ABCDEF, vk4x89ABCDEF));
w += 80;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, vacc89ABCDEFp1);
_mm256_store_ps(b, vacc01234567p0);
_mm256_store_ps(b + 8, vacc89ABCDEFp0);
b += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
w += 40;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
w += 40;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(b + 8);
b += 16;
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
__m256 vk0x01234567 = _mm256_load_ps(w);
__m256 vk0x89ABCDEF = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi0x89ABCDEF, vk0x89ABCDEF));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
__m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vk1x89ABCDEF = _mm256_load_ps(w + 24);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
__m256 vacc89ABCDEFp1 = _mm256_mul_ps(vi1x89ABCDEF, vk1x89ABCDEF);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
__m256 vk2x01234567 = _mm256_load_ps(w + 32);
__m256 vk2x89ABCDEF = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi2x89ABCDEF, vk2x89ABCDEF));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
i3 += 16;
__m256 vk3x01234567 = _mm256_load_ps(w + 48);
__m256 vk3x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
vacc89ABCDEFp1 = _mm256_add_ps(vacc89ABCDEFp1, _mm256_mul_ps(vi3x89ABCDEF, vk3x89ABCDEF));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8);
i4 += 16;
__m256 vk4x01234567 = _mm256_load_ps(w + 64);
__m256 vk4x89ABCDEF = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi4x89ABCDEF, vk4x89ABCDEF));
w += 80;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, vacc89ABCDEFp1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
__m256 vacc89ABCDEF = _mm256_max_ps(vmin, vacc89ABCDEFp0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
vacc89ABCDEF = _mm256_min_ps(vmax, vacc89ABCDEF);
_mm256_storeu_ps(output, vacc01234567);
_mm256_storeu_ps(output + 8, vacc89ABCDEF);
output += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
b += 8;
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
w += 40;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 22,188 | 35.495066 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l16c8s4r-minmax-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l16c8s4r__avx(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(w + 8);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
const __m256 vk0x89ABCDEF = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi0x89ABCDEF, vk0x89ABCDEF));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
const __m256 vk1x89ABCDEF = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi1x89ABCDEF, vk1x89ABCDEF));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
const __m256 vk2x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi2x89ABCDEF, vk2x89ABCDEF));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
i3 += 16;
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
const __m256 vk3x89ABCDEF = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi3x89ABCDEF, vk3x89ABCDEF));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8);
i4 += 16;
const __m256 vk4x01234567 = _mm256_load_ps(w + 80);
const __m256 vk4x89ABCDEF = _mm256_load_ps(w + 88);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi4x89ABCDEF, vk4x89ABCDEF));
w += 96;
_mm256_store_ps(b, vacc01234567p0);
_mm256_store_ps(b + 8, vacc89ABCDEFp0);
b += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
w += 48;
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
w += 48;
_mm256_store_ps(b, vacc01234567p0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 4);
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(b + 8);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
const __m256 vk0x01234567 = _mm256_load_ps(w);
const __m256 vk0x89ABCDEF = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi0x89ABCDEF, vk0x89ABCDEF));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
const __m256 vk1x89ABCDEF = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi1x89ABCDEF, vk1x89ABCDEF));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
const __m256 vk2x01234567 = _mm256_load_ps(w + 32);
const __m256 vk2x89ABCDEF = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi2x89ABCDEF, vk2x89ABCDEF));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
i3 += 16;
const __m256 vk3x01234567 = _mm256_load_ps(w + 48);
const __m256 vk3x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi3x89ABCDEF, vk3x89ABCDEF));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8);
i4 += 16;
const __m256 vk4x01234567 = _mm256_load_ps(w + 64);
const __m256 vk4x89ABCDEF = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi4x89ABCDEF, vk4x89ABCDEF));
w += 80;
_mm256_store_ps(b, vacc01234567p0);
_mm256_store_ps(b + 8, vacc89ABCDEFp0);
b += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
w += 40;
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
w += 40;
_mm256_store_ps(b, vacc01234567p0);
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(b + 8);
b += 16;
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
__m256 vk0x01234567 = _mm256_load_ps(w);
__m256 vk0x89ABCDEF = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi0x89ABCDEF, vk0x89ABCDEF));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
__m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vk1x89ABCDEF = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi1x89ABCDEF, vk1x89ABCDEF));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
__m256 vk2x01234567 = _mm256_load_ps(w + 32);
__m256 vk2x89ABCDEF = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi2x89ABCDEF, vk2x89ABCDEF));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
i3 += 16;
__m256 vk3x01234567 = _mm256_load_ps(w + 48);
__m256 vk3x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi3x89ABCDEF, vk3x89ABCDEF));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8);
i4 += 16;
__m256 vk4x01234567 = _mm256_load_ps(w + 64);
__m256 vk4x89ABCDEF = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi4x89ABCDEF, vk4x89ABCDEF));
w += 80;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
__m256 vacc89ABCDEF = _mm256_max_ps(vmin, vacc89ABCDEFp0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
vacc89ABCDEF = _mm256_min_ps(vmax, vacc89ABCDEF);
_mm256_storeu_ps(output, vacc01234567);
_mm256_storeu_ps(output + 8, vacc89ABCDEF);
output += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
b += 8;
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
w += 40;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 21,130 | 34.998296 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l16c8s4r-minmax-fma3-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l16c8s4r__fma3_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(w + 8);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
const __m256 vk0x89ABCDEF = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi0x89ABCDEF, vk0x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
const __m256 vk1x89ABCDEF = _mm256_load_ps(w + 40);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
__m256 vacc89ABCDEFp1 = _mm256_mul_ps(vi1x89ABCDEF, vk1x89ABCDEF);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
const __m256 vk2x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi2x89ABCDEF, vk2x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
i3 += 16;
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
const __m256 vk3x89ABCDEF = _mm256_load_ps(w + 72);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
vacc89ABCDEFp1 = _mm256_fmadd_ps(vi3x89ABCDEF, vk3x89ABCDEF, vacc89ABCDEFp1);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8);
i4 += 16;
const __m256 vk4x01234567 = _mm256_load_ps(w + 80);
const __m256 vk4x89ABCDEF = _mm256_load_ps(w + 88);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi4x89ABCDEF, vk4x89ABCDEF, vacc89ABCDEFp0);
w += 96;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, vacc89ABCDEFp1);
_mm256_store_ps(b, vacc01234567p0);
_mm256_store_ps(b + 8, vacc89ABCDEFp0);
b += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
w += 48;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
w += 48;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 4);
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(b + 8);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
const __m256 vk0x01234567 = _mm256_load_ps(w);
const __m256 vk0x89ABCDEF = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi0x89ABCDEF, vk0x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
const __m256 vk1x89ABCDEF = _mm256_load_ps(w + 24);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
__m256 vacc89ABCDEFp1 = _mm256_mul_ps(vi1x89ABCDEF, vk1x89ABCDEF);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
const __m256 vk2x01234567 = _mm256_load_ps(w + 32);
const __m256 vk2x89ABCDEF = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi2x89ABCDEF, vk2x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
i3 += 16;
const __m256 vk3x01234567 = _mm256_load_ps(w + 48);
const __m256 vk3x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
vacc89ABCDEFp1 = _mm256_fmadd_ps(vi3x89ABCDEF, vk3x89ABCDEF, vacc89ABCDEFp1);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8);
i4 += 16;
const __m256 vk4x01234567 = _mm256_load_ps(w + 64);
const __m256 vk4x89ABCDEF = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi4x89ABCDEF, vk4x89ABCDEF, vacc89ABCDEFp0);
w += 80;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, vacc89ABCDEFp1);
_mm256_store_ps(b, vacc01234567p0);
_mm256_store_ps(b + 8, vacc89ABCDEFp0);
b += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
w += 40;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
w += 40;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(b + 8);
b += 16;
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
__m256 vk0x01234567 = _mm256_load_ps(w);
__m256 vk0x89ABCDEF = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi0x89ABCDEF, vk0x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
__m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vk1x89ABCDEF = _mm256_load_ps(w + 24);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
__m256 vacc89ABCDEFp1 = _mm256_mul_ps(vi1x89ABCDEF, vk1x89ABCDEF);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
__m256 vk2x01234567 = _mm256_load_ps(w + 32);
__m256 vk2x89ABCDEF = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi2x89ABCDEF, vk2x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
i3 += 16;
__m256 vk3x01234567 = _mm256_load_ps(w + 48);
__m256 vk3x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
vacc89ABCDEFp1 = _mm256_fmadd_ps(vi3x89ABCDEF, vk3x89ABCDEF, vacc89ABCDEFp1);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8);
i4 += 16;
__m256 vk4x01234567 = _mm256_load_ps(w + 64);
__m256 vk4x89ABCDEF = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi4x89ABCDEF, vk4x89ABCDEF, vacc89ABCDEFp0);
w += 80;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, vacc89ABCDEFp1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
__m256 vacc89ABCDEF = _mm256_max_ps(vmin, vacc89ABCDEFp0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
vacc89ABCDEF = _mm256_min_ps(vmax, vacc89ABCDEF);
_mm256_storeu_ps(output, vacc01234567);
_mm256_storeu_ps(output + 8, vacc89ABCDEF);
output += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
b += 8;
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
w += 40;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 21,565 | 34.470395 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l16c8s4r-minmax-fma3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l16c8s4r__fma3(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(w + 8);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
const __m256 vk0x89ABCDEF = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi0x89ABCDEF, vk0x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
const __m256 vk1x89ABCDEF = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi1x89ABCDEF, vk1x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
const __m256 vk2x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi2x89ABCDEF, vk2x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
i3 += 16;
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
const __m256 vk3x89ABCDEF = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi3x89ABCDEF, vk3x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8);
i4 += 16;
const __m256 vk4x01234567 = _mm256_load_ps(w + 80);
const __m256 vk4x89ABCDEF = _mm256_load_ps(w + 88);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi4x89ABCDEF, vk4x89ABCDEF, vacc89ABCDEFp0);
w += 96;
_mm256_store_ps(b, vacc01234567p0);
_mm256_store_ps(b + 8, vacc89ABCDEFp0);
b += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
w += 48;
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
w += 48;
_mm256_store_ps(b, vacc01234567p0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 4);
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(b + 8);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
const __m256 vk0x01234567 = _mm256_load_ps(w);
const __m256 vk0x89ABCDEF = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi0x89ABCDEF, vk0x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
const __m256 vk1x89ABCDEF = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi1x89ABCDEF, vk1x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
const __m256 vk2x01234567 = _mm256_load_ps(w + 32);
const __m256 vk2x89ABCDEF = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi2x89ABCDEF, vk2x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
i3 += 16;
const __m256 vk3x01234567 = _mm256_load_ps(w + 48);
const __m256 vk3x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi3x89ABCDEF, vk3x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8);
i4 += 16;
const __m256 vk4x01234567 = _mm256_load_ps(w + 64);
const __m256 vk4x89ABCDEF = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi4x89ABCDEF, vk4x89ABCDEF, vacc89ABCDEFp0);
w += 80;
_mm256_store_ps(b, vacc01234567p0);
_mm256_store_ps(b + 8, vacc89ABCDEFp0);
b += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
w += 40;
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
w += 40;
_mm256_store_ps(b, vacc01234567p0);
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(b + 8);
b += 16;
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
__m256 vk0x01234567 = _mm256_load_ps(w);
__m256 vk0x89ABCDEF = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi0x89ABCDEF, vk0x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
__m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vk1x89ABCDEF = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi1x89ABCDEF, vk1x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
__m256 vk2x01234567 = _mm256_load_ps(w + 32);
__m256 vk2x89ABCDEF = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi2x89ABCDEF, vk2x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
i3 += 16;
__m256 vk3x01234567 = _mm256_load_ps(w + 48);
__m256 vk3x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi3x89ABCDEF, vk3x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8);
i4 += 16;
__m256 vk4x01234567 = _mm256_load_ps(w + 64);
__m256 vk4x89ABCDEF = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi4x89ABCDEF, vk4x89ABCDEF, vacc89ABCDEFp0);
w += 80;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
__m256 vacc89ABCDEF = _mm256_max_ps(vmin, vacc89ABCDEFp0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
vacc89ABCDEF = _mm256_min_ps(vmax, vacc89ABCDEF);
_mm256_storeu_ps(output, vacc01234567);
_mm256_storeu_ps(output + 8, vacc89ABCDEF);
output += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
b += 8;
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
w += 40;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 20,351 | 33.67121 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l1c1s1r-minmax-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l1c1s1r__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
w += 6;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
*b++ = vacc0p0;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
w += 5;
*b++ = vacc0p0;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
w += 5;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,580 | 28.913636 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l1c1s1r-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l1c1s1r__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
w += 6;
*b++ = vacc0p0;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
w += 5;
*b++ = vacc0p0;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
w += 5;
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,383 | 28.831776 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l1c1s1r-minmax-wasm-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l1c1s1r__wasm_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
w += 6;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
*b++ = vacc0p0;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
w += 5;
*b++ = vacc0p0;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
w += 5;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
*output++ = vacc0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,598 | 28.995455 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l1c1s1r-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l1c1s1r__wasm(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
w += 6;
*b++ = vacc0p0;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
w += 5;
*b++ = vacc0p0;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
w += 5;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
*output++ = vacc0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,401 | 28.915888 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l1c1s1r-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_5f5m5l1c1s1r__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
w += 6;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
*b++ = vacc0p0;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
w += 5;
*b++ = vacc0p0;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
w += 5;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
*output++ = vacc0p0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,400 | 28.634259 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l1c1s1r-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_5f5m5l1c1s1r__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
w += 6;
*b++ = vacc0p0;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
w += 5;
*b++ = vacc0p0;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
w += 5;
*output++ = vacc0p0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,203 | 28.542857 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l32c16s1r-minmax-avx512f-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-avx512.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l32c16s1r__avx512f_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = channels;
for (; c >= 32; c -= 32) {
__m512 vacc0p0 = _mm512_load_ps(w);
__m512 vacc1p0 = _mm512_load_ps(w + 16);
const __m512 vi0x0 = _mm512_loadu_ps(i0);
const __m512 vi0x1 = _mm512_loadu_ps(i0 + 16);
i0 += 32;
const __m512 vk0x0 = _mm512_load_ps(w + 32);
const __m512 vk0x1 = _mm512_load_ps(w + 48);
vacc0p0 = _mm512_fmadd_ps(vi0x0, vk0x0, vacc0p0);
vacc1p0 = _mm512_fmadd_ps(vi0x1, vk0x1, vacc1p0);
const __m512 vi1x0 = _mm512_loadu_ps(i1);
const __m512 vi1x1 = _mm512_loadu_ps(i1 + 16);
i1 += 32;
const __m512 vk1x0 = _mm512_load_ps(w + 64);
const __m512 vk1x1 = _mm512_load_ps(w + 80);
__m512 vacc0p1 = _mm512_mul_ps(vi1x0, vk1x0);
__m512 vacc1p1 = _mm512_mul_ps(vi1x1, vk1x1);
const __m512 vi2x0 = _mm512_loadu_ps(i2);
const __m512 vi2x1 = _mm512_loadu_ps(i2 + 16);
i2 += 32;
const __m512 vk2x0 = _mm512_load_ps(w + 96);
const __m512 vk2x1 = _mm512_load_ps(w + 112);
vacc0p0 = _mm512_fmadd_ps(vi2x0, vk2x0, vacc0p0);
vacc1p0 = _mm512_fmadd_ps(vi2x1, vk2x1, vacc1p0);
const __m512 vi3x0 = _mm512_loadu_ps(i3);
const __m512 vi3x1 = _mm512_loadu_ps(i3 + 16);
i3 += 32;
const __m512 vk3x0 = _mm512_load_ps(w + 128);
const __m512 vk3x1 = _mm512_load_ps(w + 144);
vacc0p1 = _mm512_fmadd_ps(vi3x0, vk3x0, vacc0p1);
vacc1p1 = _mm512_fmadd_ps(vi3x1, vk3x1, vacc1p1);
const __m512 vi4x0 = _mm512_loadu_ps(i4);
const __m512 vi4x1 = _mm512_loadu_ps(i4 + 16);
i4 += 32;
const __m512 vk4x0 = _mm512_load_ps(w + 160);
const __m512 vk4x1 = _mm512_load_ps(w + 176);
vacc0p0 = _mm512_fmadd_ps(vi4x0, vk4x0, vacc0p0);
vacc1p0 = _mm512_fmadd_ps(vi4x1, vk4x1, vacc1p0);
w += 192;
// Add up all accumulators to vacc0p0
vacc0p0 = _mm512_add_ps(vacc0p0, vacc0p1);
vacc1p0 = _mm512_add_ps(vacc1p0, vacc1p1);
_mm512_store_ps(b, vacc0p0);
_mm512_store_ps(b + 16, vacc1p0);
b += 32;
}
for (; c >= 16; c -= 16) {
__m512 vaccp0 = _mm512_load_ps(w);
const __m512 vi0x0 = _mm512_loadu_ps(i0);
i0 += 16;
const __m512 vk0x0 = _mm512_load_ps(w + 16);
vaccp0 = _mm512_fmadd_ps(vi0x0, vk0x0, vaccp0);
const __m512 vi1x0 = _mm512_loadu_ps(i1);
i1 += 16;
const __m512 vk1x0 = _mm512_load_ps(w + 32);
__m512 vaccp1 = _mm512_mul_ps(vi1x0, vk1x0);
const __m512 vi2x0 = _mm512_loadu_ps(i2);
i2 += 16;
const __m512 vk2x0 = _mm512_load_ps(w + 48);
vaccp0 = _mm512_fmadd_ps(vi2x0, vk2x0, vaccp0);
const __m512 vi3x0 = _mm512_loadu_ps(i3);
i3 += 16;
const __m512 vk3x0 = _mm512_load_ps(w + 64);
vaccp1 = _mm512_fmadd_ps(vi3x0, vk3x0, vaccp1);
const __m512 vi4x0 = _mm512_loadu_ps(i4);
i4 += 16;
const __m512 vk4x0 = _mm512_load_ps(w + 80);
vaccp0 = _mm512_fmadd_ps(vi4x0, vk4x0, vaccp0);
w += 96;
// Add up all accumulators to vaccp0
vaccp0 = _mm512_add_ps(vaccp0, vaccp1);
_mm512_store_ps(b, vaccp0);
b += 16;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 15);
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
__m512 vaccp0 = _mm512_load_ps(w);
const __m512 vi0x0 = _mm512_maskz_loadu_ps(vmask, i0);
const __m512 vk0x0 = _mm512_load_ps(w + 16);
vaccp0 = _mm512_fmadd_ps(vi0x0, vk0x0, vaccp0);
const __m512 vi1x0 = _mm512_maskz_loadu_ps(vmask, i1);
const __m512 vk1x0 = _mm512_load_ps(w + 32);
__m512 vaccp1 = _mm512_mul_ps(vi1x0, vk1x0);
const __m512 vi2x0 = _mm512_maskz_loadu_ps(vmask, i2);
const __m512 vk2x0 = _mm512_load_ps(w + 48);
vaccp0 = _mm512_fmadd_ps(vi2x0, vk2x0, vaccp0);
const __m512 vi3x0 = _mm512_maskz_loadu_ps(vmask, i3);
const __m512 vk3x0 = _mm512_load_ps(w + 64);
vaccp1 = _mm512_fmadd_ps(vi3x0, vk3x0, vaccp1);
const __m512 vi4x0 = _mm512_maskz_loadu_ps(vmask, i4);
const __m512 vk4x0 = _mm512_load_ps(w + 80);
vaccp0 = _mm512_fmadd_ps(vi4x0, vk4x0, vaccp0);
w += 96;
// Add up all accumulators to vaccp0
vaccp0 = _mm512_add_ps(vaccp0, vaccp1);
_mm512_store_ps(b, vaccp0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 32; c -= 32) {
__m512 vacc0p0 = _mm512_load_ps(b);
__m512 vacc1p0 = _mm512_load_ps(b + 16);
const __m512 vi0x0 = _mm512_loadu_ps(i0);
const __m512 vi0x1 = _mm512_loadu_ps(i0 + 16);
i0 += 32;
const __m512 vk0x0 = _mm512_load_ps(w);
const __m512 vk0x1 = _mm512_load_ps(w + 16);
vacc0p0 = _mm512_fmadd_ps(vi0x0, vk0x0, vacc0p0);
vacc1p0 = _mm512_fmadd_ps(vi0x1, vk0x1, vacc1p0);
const __m512 vi1x0 = _mm512_loadu_ps(i1);
const __m512 vi1x1 = _mm512_loadu_ps(i1 + 16);
i1 += 32;
const __m512 vk1x0 = _mm512_load_ps(w + 32);
const __m512 vk1x1 = _mm512_load_ps(w + 48);
__m512 vacc0p1 = _mm512_mul_ps(vi1x0, vk1x0);
__m512 vacc1p1 = _mm512_mul_ps(vi1x1, vk1x1);
const __m512 vi2x0 = _mm512_loadu_ps(i2);
const __m512 vi2x1 = _mm512_loadu_ps(i2 + 16);
i2 += 32;
const __m512 vk2x0 = _mm512_load_ps(w + 64);
const __m512 vk2x1 = _mm512_load_ps(w + 80);
vacc0p0 = _mm512_fmadd_ps(vi2x0, vk2x0, vacc0p0);
vacc1p0 = _mm512_fmadd_ps(vi2x1, vk2x1, vacc1p0);
const __m512 vi3x0 = _mm512_loadu_ps(i3);
const __m512 vi3x1 = _mm512_loadu_ps(i3 + 16);
i3 += 32;
const __m512 vk3x0 = _mm512_load_ps(w + 96);
const __m512 vk3x1 = _mm512_load_ps(w + 112);
vacc0p1 = _mm512_fmadd_ps(vi3x0, vk3x0, vacc0p1);
vacc1p1 = _mm512_fmadd_ps(vi3x1, vk3x1, vacc1p1);
const __m512 vi4x0 = _mm512_loadu_ps(i4);
const __m512 vi4x1 = _mm512_loadu_ps(i4 + 16);
i4 += 32;
const __m512 vk4x0 = _mm512_load_ps(w + 128);
const __m512 vk4x1 = _mm512_load_ps(w + 144);
vacc0p0 = _mm512_fmadd_ps(vi4x0, vk4x0, vacc0p0);
vacc1p0 = _mm512_fmadd_ps(vi4x1, vk4x1, vacc1p0);
w += 160;
// Add up all accumulators to vacc0p0
vacc0p0 = _mm512_add_ps(vacc0p0, vacc0p1);
vacc1p0 = _mm512_add_ps(vacc1p0, vacc1p1);
_mm512_store_ps(b, vacc0p0);
_mm512_store_ps(b + 16, vacc1p0);
b += 32;
}
for (; c >= 16; c -= 16) {
__m512 vaccp0 = _mm512_load_ps(b);
const __m512 vi0x0 = _mm512_loadu_ps(i0);
i0 += 16;
const __m512 vk0x0 = _mm512_load_ps(w);
vaccp0 = _mm512_fmadd_ps(vi0x0, vk0x0, vaccp0);
const __m512 vi1x0 = _mm512_loadu_ps(i1);
i1 += 16;
const __m512 vk1x0 = _mm512_load_ps(w + 16);
__m512 vaccp1 = _mm512_mul_ps(vi1x0, vk1x0);
const __m512 vi2x0 = _mm512_loadu_ps(i2);
i2 += 16;
const __m512 vk2x0 = _mm512_load_ps(w + 32);
vaccp0 = _mm512_fmadd_ps(vi2x0, vk2x0, vaccp0);
const __m512 vi3x0 = _mm512_loadu_ps(i3);
i3 += 16;
const __m512 vk3x0 = _mm512_load_ps(w + 48);
vaccp1 = _mm512_fmadd_ps(vi3x0, vk3x0, vaccp1);
const __m512 vi4x0 = _mm512_loadu_ps(i4);
i4 += 16;
const __m512 vk4x0 = _mm512_load_ps(w + 64);
vaccp0 = _mm512_fmadd_ps(vi4x0, vk4x0, vaccp0);
w += 80;
// Add up all accumulators to vaccp0
vaccp0 = _mm512_add_ps(vaccp0, vaccp1);
_mm512_store_ps(b, vaccp0);
b += 16;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 15);
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
__m512 vaccp0 = _mm512_load_ps(b);
const __m512 vi0x0 = _mm512_maskz_loadu_ps(vmask, i0);
const __m512 vk0x0 = _mm512_load_ps(w);
vaccp0 = _mm512_fmadd_ps(vi0x0, vk0x0, vaccp0);
const __m512 vi1x0 = _mm512_maskz_loadu_ps(vmask, i1);
const __m512 vk1x0 = _mm512_load_ps(w + 16);
__m512 vaccp1 = _mm512_mul_ps(vi1x0, vk1x0);
const __m512 vi2x0 = _mm512_maskz_loadu_ps(vmask, i2);
const __m512 vk2x0 = _mm512_load_ps(w + 32);
vaccp0 = _mm512_fmadd_ps(vi2x0, vk2x0, vaccp0);
const __m512 vi3x0 = _mm512_maskz_loadu_ps(vmask, i3);
const __m512 vk3x0 = _mm512_load_ps(w + 48);
vaccp1 = _mm512_fmadd_ps(vi3x0, vk3x0, vaccp1);
const __m512 vi4x0 = _mm512_maskz_loadu_ps(vmask, i4);
const __m512 vk4x0 = _mm512_load_ps(w + 64);
vaccp0 = _mm512_fmadd_ps(vi4x0, vk4x0, vaccp0);
w += 80;
// Add up all accumulators to vaccp0
vaccp0 = _mm512_add_ps(vaccp0, vaccp1);
_mm512_store_ps(b, vaccp0);
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 32; c -= 32) {
__m512 vacc0p0 = _mm512_load_ps(b);
__m512 vacc1p0 = _mm512_load_ps(b + 16);
b += 32;
const __m512 vi0x0 = _mm512_loadu_ps(i0);
const __m512 vi0x1 = _mm512_loadu_ps(i0 + 16);
i0 += 32;
__m512 vk0x0 = _mm512_load_ps(w);
__m512 vk0x1 = _mm512_load_ps(w + 16);
vacc0p0 = _mm512_fmadd_ps(vi0x0, vk0x0, vacc0p0);
vacc1p0 = _mm512_fmadd_ps(vi0x1, vk0x1, vacc1p0);
const __m512 vi1x0 = _mm512_loadu_ps(i1);
const __m512 vi1x1 = _mm512_loadu_ps(i1 + 16);
i1 += 32;
__m512 vk1x0 = _mm512_load_ps(w + 32);
__m512 vk1x1 = _mm512_load_ps(w + 48);
__m512 vacc0p1 = _mm512_mul_ps(vi1x0, vk1x0);
__m512 vacc1p1 = _mm512_mul_ps(vi1x1, vk1x1);
const __m512 vi2x0 = _mm512_loadu_ps(i2);
const __m512 vi2x1 = _mm512_loadu_ps(i2 + 16);
i2 += 32;
__m512 vk2x0 = _mm512_load_ps(w + 64);
__m512 vk2x1 = _mm512_load_ps(w + 80);
vacc0p0 = _mm512_fmadd_ps(vi2x0, vk2x0, vacc0p0);
vacc1p0 = _mm512_fmadd_ps(vi2x1, vk2x1, vacc1p0);
const __m512 vi3x0 = _mm512_loadu_ps(i3);
const __m512 vi3x1 = _mm512_loadu_ps(i3 + 16);
i3 += 32;
__m512 vk3x0 = _mm512_load_ps(w + 96);
__m512 vk3x1 = _mm512_load_ps(w + 112);
vacc0p1 = _mm512_fmadd_ps(vi3x0, vk3x0, vacc0p1);
vacc1p1 = _mm512_fmadd_ps(vi3x1, vk3x1, vacc1p1);
const __m512 vi4x0 = _mm512_loadu_ps(i4);
const __m512 vi4x1 = _mm512_loadu_ps(i4 + 16);
i4 += 32;
__m512 vk4x0 = _mm512_load_ps(w + 128);
__m512 vk4x1 = _mm512_load_ps(w + 144);
vacc0p0 = _mm512_fmadd_ps(vi4x0, vk4x0, vacc0p0);
vacc1p0 = _mm512_fmadd_ps(vi4x1, vk4x1, vacc1p0);
w += 160;
// Add up all accumulators to vacc0p0
vacc0p0 = _mm512_add_ps(vacc0p0, vacc0p1);
vacc1p0 = _mm512_add_ps(vacc1p0, vacc1p1);
__m512 vacc0 = _mm512_max_ps(vmin, vacc0p0);
__m512 vacc1 = _mm512_max_ps(vmin, vacc1p0);
vacc0 = _mm512_min_ps(vmax, vacc0);
vacc1 = _mm512_min_ps(vmax, vacc1);
_mm512_storeu_ps(output, vacc0);
_mm512_storeu_ps(output + 16, vacc1);
output += 32;
}
for (; c >= 16; c -= 16) {
__m512 vaccp0 = _mm512_load_ps(b);
b += 16;
const __m512 vi0x0 = _mm512_loadu_ps(i0);
i0 += 16;
__m512 vk0x0 = _mm512_load_ps(w);
vaccp0 = _mm512_fmadd_ps(vi0x0, vk0x0, vaccp0);
const __m512 vi1x0 = _mm512_loadu_ps(i1);
i1 += 16;
__m512 vk1x0 = _mm512_load_ps(w + 16);
__m512 vaccp1 = _mm512_mul_ps(vi1x0, vk1x0);
const __m512 vi2x0 = _mm512_loadu_ps(i2);
i2 += 16;
__m512 vk2x0 = _mm512_load_ps(w + 32);
vaccp0 = _mm512_fmadd_ps(vi2x0, vk2x0, vaccp0);
const __m512 vi3x0 = _mm512_loadu_ps(i3);
i3 += 16;
__m512 vk3x0 = _mm512_load_ps(w + 48);
vaccp1 = _mm512_fmadd_ps(vi3x0, vk3x0, vaccp1);
const __m512 vi4x0 = _mm512_loadu_ps(i4);
i4 += 16;
__m512 vk4x0 = _mm512_load_ps(w + 64);
vaccp0 = _mm512_fmadd_ps(vi4x0, vk4x0, vaccp0);
w += 80;
// Add up all accumulators to vaccp0
vaccp0 = _mm512_add_ps(vaccp0, vaccp1);
__m512 vacc = _mm512_max_ps(vmin, vaccp0);
vacc = _mm512_min_ps(vmax, vacc);
_mm512_storeu_ps(output, vacc);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 15);
__m512 vaccp0 = _mm512_load_ps(b);
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
const __m512 vi0x0 = _mm512_maskz_loadu_ps(vmask, i0);
__m512 vk0x0 = _mm512_load_ps(w);
vaccp0 = _mm512_fmadd_ps(vi0x0, vk0x0, vaccp0);
const __m512 vi1x0 = _mm512_maskz_loadu_ps(vmask, i1);
__m512 vk1x0 = _mm512_load_ps(w + 16);
__m512 vaccp1 = _mm512_mul_ps(vi1x0, vk1x0);
const __m512 vi2x0 = _mm512_maskz_loadu_ps(vmask, i2);
__m512 vk2x0 = _mm512_load_ps(w + 32);
vaccp0 = _mm512_fmadd_ps(vi2x0, vk2x0, vaccp0);
const __m512 vi3x0 = _mm512_maskz_loadu_ps(vmask, i3);
__m512 vk3x0 = _mm512_load_ps(w + 48);
vaccp1 = _mm512_fmadd_ps(vi3x0, vk3x0, vaccp1);
const __m512 vi4x0 = _mm512_maskz_loadu_ps(vmask, i4);
__m512 vk4x0 = _mm512_load_ps(w + 64);
vaccp0 = _mm512_fmadd_ps(vi4x0, vk4x0, vaccp0);
// Add up all accumulators to vaccp0
vaccp0 = _mm512_add_ps(vaccp0, vaccp1);
__m512 vacc = _mm512_max_ps(vmin, vaccp0);
vacc = _mm512_min_ps(vmax, vacc);
_mm512_mask_storeu_ps(output, vmask, vacc);
output += c;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 18,113 | 29.494949 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l32c16s1r-minmax-avx512f.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-avx512.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l32c16s1r__avx512f(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = channels;
for (; c >= 32; c -= 32) {
__m512 vacc0p0 = _mm512_load_ps(w);
__m512 vacc1p0 = _mm512_load_ps(w + 16);
const __m512 vi0x0 = _mm512_loadu_ps(i0);
const __m512 vi0x1 = _mm512_loadu_ps(i0 + 16);
i0 += 32;
const __m512 vk0x0 = _mm512_load_ps(w + 32);
const __m512 vk0x1 = _mm512_load_ps(w + 48);
vacc0p0 = _mm512_fmadd_ps(vi0x0, vk0x0, vacc0p0);
vacc1p0 = _mm512_fmadd_ps(vi0x1, vk0x1, vacc1p0);
const __m512 vi1x0 = _mm512_loadu_ps(i1);
const __m512 vi1x1 = _mm512_loadu_ps(i1 + 16);
i1 += 32;
const __m512 vk1x0 = _mm512_load_ps(w + 64);
const __m512 vk1x1 = _mm512_load_ps(w + 80);
vacc0p0 = _mm512_fmadd_ps(vi1x0, vk1x0, vacc0p0);
vacc1p0 = _mm512_fmadd_ps(vi1x1, vk1x1, vacc1p0);
const __m512 vi2x0 = _mm512_loadu_ps(i2);
const __m512 vi2x1 = _mm512_loadu_ps(i2 + 16);
i2 += 32;
const __m512 vk2x0 = _mm512_load_ps(w + 96);
const __m512 vk2x1 = _mm512_load_ps(w + 112);
vacc0p0 = _mm512_fmadd_ps(vi2x0, vk2x0, vacc0p0);
vacc1p0 = _mm512_fmadd_ps(vi2x1, vk2x1, vacc1p0);
const __m512 vi3x0 = _mm512_loadu_ps(i3);
const __m512 vi3x1 = _mm512_loadu_ps(i3 + 16);
i3 += 32;
const __m512 vk3x0 = _mm512_load_ps(w + 128);
const __m512 vk3x1 = _mm512_load_ps(w + 144);
vacc0p0 = _mm512_fmadd_ps(vi3x0, vk3x0, vacc0p0);
vacc1p0 = _mm512_fmadd_ps(vi3x1, vk3x1, vacc1p0);
const __m512 vi4x0 = _mm512_loadu_ps(i4);
const __m512 vi4x1 = _mm512_loadu_ps(i4 + 16);
i4 += 32;
const __m512 vk4x0 = _mm512_load_ps(w + 160);
const __m512 vk4x1 = _mm512_load_ps(w + 176);
vacc0p0 = _mm512_fmadd_ps(vi4x0, vk4x0, vacc0p0);
vacc1p0 = _mm512_fmadd_ps(vi4x1, vk4x1, vacc1p0);
w += 192;
_mm512_store_ps(b, vacc0p0);
_mm512_store_ps(b + 16, vacc1p0);
b += 32;
}
for (; c >= 16; c -= 16) {
__m512 vaccp0 = _mm512_load_ps(w);
const __m512 vi0x0 = _mm512_loadu_ps(i0);
i0 += 16;
const __m512 vk0x0 = _mm512_load_ps(w + 16);
vaccp0 = _mm512_fmadd_ps(vi0x0, vk0x0, vaccp0);
const __m512 vi1x0 = _mm512_loadu_ps(i1);
i1 += 16;
const __m512 vk1x0 = _mm512_load_ps(w + 32);
vaccp0 = _mm512_fmadd_ps(vi1x0, vk1x0, vaccp0);
const __m512 vi2x0 = _mm512_loadu_ps(i2);
i2 += 16;
const __m512 vk2x0 = _mm512_load_ps(w + 48);
vaccp0 = _mm512_fmadd_ps(vi2x0, vk2x0, vaccp0);
const __m512 vi3x0 = _mm512_loadu_ps(i3);
i3 += 16;
const __m512 vk3x0 = _mm512_load_ps(w + 64);
vaccp0 = _mm512_fmadd_ps(vi3x0, vk3x0, vaccp0);
const __m512 vi4x0 = _mm512_loadu_ps(i4);
i4 += 16;
const __m512 vk4x0 = _mm512_load_ps(w + 80);
vaccp0 = _mm512_fmadd_ps(vi4x0, vk4x0, vaccp0);
w += 96;
_mm512_store_ps(b, vaccp0);
b += 16;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 15);
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
__m512 vaccp0 = _mm512_load_ps(w);
const __m512 vi0x0 = _mm512_maskz_loadu_ps(vmask, i0);
const __m512 vk0x0 = _mm512_load_ps(w + 16);
vaccp0 = _mm512_fmadd_ps(vi0x0, vk0x0, vaccp0);
const __m512 vi1x0 = _mm512_maskz_loadu_ps(vmask, i1);
const __m512 vk1x0 = _mm512_load_ps(w + 32);
vaccp0 = _mm512_fmadd_ps(vi1x0, vk1x0, vaccp0);
const __m512 vi2x0 = _mm512_maskz_loadu_ps(vmask, i2);
const __m512 vk2x0 = _mm512_load_ps(w + 48);
vaccp0 = _mm512_fmadd_ps(vi2x0, vk2x0, vaccp0);
const __m512 vi3x0 = _mm512_maskz_loadu_ps(vmask, i3);
const __m512 vk3x0 = _mm512_load_ps(w + 64);
vaccp0 = _mm512_fmadd_ps(vi3x0, vk3x0, vaccp0);
const __m512 vi4x0 = _mm512_maskz_loadu_ps(vmask, i4);
const __m512 vk4x0 = _mm512_load_ps(w + 80);
vaccp0 = _mm512_fmadd_ps(vi4x0, vk4x0, vaccp0);
w += 96;
_mm512_store_ps(b, vaccp0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 32; c -= 32) {
__m512 vacc0p0 = _mm512_load_ps(b);
__m512 vacc1p0 = _mm512_load_ps(b + 16);
const __m512 vi0x0 = _mm512_loadu_ps(i0);
const __m512 vi0x1 = _mm512_loadu_ps(i0 + 16);
i0 += 32;
const __m512 vk0x0 = _mm512_load_ps(w);
const __m512 vk0x1 = _mm512_load_ps(w + 16);
vacc0p0 = _mm512_fmadd_ps(vi0x0, vk0x0, vacc0p0);
vacc1p0 = _mm512_fmadd_ps(vi0x1, vk0x1, vacc1p0);
const __m512 vi1x0 = _mm512_loadu_ps(i1);
const __m512 vi1x1 = _mm512_loadu_ps(i1 + 16);
i1 += 32;
const __m512 vk1x0 = _mm512_load_ps(w + 32);
const __m512 vk1x1 = _mm512_load_ps(w + 48);
vacc0p0 = _mm512_fmadd_ps(vi1x0, vk1x0, vacc0p0);
vacc1p0 = _mm512_fmadd_ps(vi1x1, vk1x1, vacc1p0);
const __m512 vi2x0 = _mm512_loadu_ps(i2);
const __m512 vi2x1 = _mm512_loadu_ps(i2 + 16);
i2 += 32;
const __m512 vk2x0 = _mm512_load_ps(w + 64);
const __m512 vk2x1 = _mm512_load_ps(w + 80);
vacc0p0 = _mm512_fmadd_ps(vi2x0, vk2x0, vacc0p0);
vacc1p0 = _mm512_fmadd_ps(vi2x1, vk2x1, vacc1p0);
const __m512 vi3x0 = _mm512_loadu_ps(i3);
const __m512 vi3x1 = _mm512_loadu_ps(i3 + 16);
i3 += 32;
const __m512 vk3x0 = _mm512_load_ps(w + 96);
const __m512 vk3x1 = _mm512_load_ps(w + 112);
vacc0p0 = _mm512_fmadd_ps(vi3x0, vk3x0, vacc0p0);
vacc1p0 = _mm512_fmadd_ps(vi3x1, vk3x1, vacc1p0);
const __m512 vi4x0 = _mm512_loadu_ps(i4);
const __m512 vi4x1 = _mm512_loadu_ps(i4 + 16);
i4 += 32;
const __m512 vk4x0 = _mm512_load_ps(w + 128);
const __m512 vk4x1 = _mm512_load_ps(w + 144);
vacc0p0 = _mm512_fmadd_ps(vi4x0, vk4x0, vacc0p0);
vacc1p0 = _mm512_fmadd_ps(vi4x1, vk4x1, vacc1p0);
w += 160;
_mm512_store_ps(b, vacc0p0);
_mm512_store_ps(b + 16, vacc1p0);
b += 32;
}
for (; c >= 16; c -= 16) {
__m512 vaccp0 = _mm512_load_ps(b);
const __m512 vi0x0 = _mm512_loadu_ps(i0);
i0 += 16;
const __m512 vk0x0 = _mm512_load_ps(w);
vaccp0 = _mm512_fmadd_ps(vi0x0, vk0x0, vaccp0);
const __m512 vi1x0 = _mm512_loadu_ps(i1);
i1 += 16;
const __m512 vk1x0 = _mm512_load_ps(w + 16);
vaccp0 = _mm512_fmadd_ps(vi1x0, vk1x0, vaccp0);
const __m512 vi2x0 = _mm512_loadu_ps(i2);
i2 += 16;
const __m512 vk2x0 = _mm512_load_ps(w + 32);
vaccp0 = _mm512_fmadd_ps(vi2x0, vk2x0, vaccp0);
const __m512 vi3x0 = _mm512_loadu_ps(i3);
i3 += 16;
const __m512 vk3x0 = _mm512_load_ps(w + 48);
vaccp0 = _mm512_fmadd_ps(vi3x0, vk3x0, vaccp0);
const __m512 vi4x0 = _mm512_loadu_ps(i4);
i4 += 16;
const __m512 vk4x0 = _mm512_load_ps(w + 64);
vaccp0 = _mm512_fmadd_ps(vi4x0, vk4x0, vaccp0);
w += 80;
_mm512_store_ps(b, vaccp0);
b += 16;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 15);
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
__m512 vaccp0 = _mm512_load_ps(b);
const __m512 vi0x0 = _mm512_maskz_loadu_ps(vmask, i0);
const __m512 vk0x0 = _mm512_load_ps(w);
vaccp0 = _mm512_fmadd_ps(vi0x0, vk0x0, vaccp0);
const __m512 vi1x0 = _mm512_maskz_loadu_ps(vmask, i1);
const __m512 vk1x0 = _mm512_load_ps(w + 16);
vaccp0 = _mm512_fmadd_ps(vi1x0, vk1x0, vaccp0);
const __m512 vi2x0 = _mm512_maskz_loadu_ps(vmask, i2);
const __m512 vk2x0 = _mm512_load_ps(w + 32);
vaccp0 = _mm512_fmadd_ps(vi2x0, vk2x0, vaccp0);
const __m512 vi3x0 = _mm512_maskz_loadu_ps(vmask, i3);
const __m512 vk3x0 = _mm512_load_ps(w + 48);
vaccp0 = _mm512_fmadd_ps(vi3x0, vk3x0, vaccp0);
const __m512 vi4x0 = _mm512_maskz_loadu_ps(vmask, i4);
const __m512 vk4x0 = _mm512_load_ps(w + 64);
vaccp0 = _mm512_fmadd_ps(vi4x0, vk4x0, vaccp0);
w += 80;
_mm512_store_ps(b, vaccp0);
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 32; c -= 32) {
__m512 vacc0p0 = _mm512_load_ps(b);
__m512 vacc1p0 = _mm512_load_ps(b + 16);
b += 32;
const __m512 vi0x0 = _mm512_loadu_ps(i0);
const __m512 vi0x1 = _mm512_loadu_ps(i0 + 16);
i0 += 32;
__m512 vk0x0 = _mm512_load_ps(w);
__m512 vk0x1 = _mm512_load_ps(w + 16);
vacc0p0 = _mm512_fmadd_ps(vi0x0, vk0x0, vacc0p0);
vacc1p0 = _mm512_fmadd_ps(vi0x1, vk0x1, vacc1p0);
const __m512 vi1x0 = _mm512_loadu_ps(i1);
const __m512 vi1x1 = _mm512_loadu_ps(i1 + 16);
i1 += 32;
__m512 vk1x0 = _mm512_load_ps(w + 32);
__m512 vk1x1 = _mm512_load_ps(w + 48);
vacc0p0 = _mm512_fmadd_ps(vi1x0, vk1x0, vacc0p0);
vacc1p0 = _mm512_fmadd_ps(vi1x1, vk1x1, vacc1p0);
const __m512 vi2x0 = _mm512_loadu_ps(i2);
const __m512 vi2x1 = _mm512_loadu_ps(i2 + 16);
i2 += 32;
__m512 vk2x0 = _mm512_load_ps(w + 64);
__m512 vk2x1 = _mm512_load_ps(w + 80);
vacc0p0 = _mm512_fmadd_ps(vi2x0, vk2x0, vacc0p0);
vacc1p0 = _mm512_fmadd_ps(vi2x1, vk2x1, vacc1p0);
const __m512 vi3x0 = _mm512_loadu_ps(i3);
const __m512 vi3x1 = _mm512_loadu_ps(i3 + 16);
i3 += 32;
__m512 vk3x0 = _mm512_load_ps(w + 96);
__m512 vk3x1 = _mm512_load_ps(w + 112);
vacc0p0 = _mm512_fmadd_ps(vi3x0, vk3x0, vacc0p0);
vacc1p0 = _mm512_fmadd_ps(vi3x1, vk3x1, vacc1p0);
const __m512 vi4x0 = _mm512_loadu_ps(i4);
const __m512 vi4x1 = _mm512_loadu_ps(i4 + 16);
i4 += 32;
__m512 vk4x0 = _mm512_load_ps(w + 128);
__m512 vk4x1 = _mm512_load_ps(w + 144);
vacc0p0 = _mm512_fmadd_ps(vi4x0, vk4x0, vacc0p0);
vacc1p0 = _mm512_fmadd_ps(vi4x1, vk4x1, vacc1p0);
w += 160;
__m512 vacc0 = _mm512_max_ps(vmin, vacc0p0);
__m512 vacc1 = _mm512_max_ps(vmin, vacc1p0);
vacc0 = _mm512_min_ps(vmax, vacc0);
vacc1 = _mm512_min_ps(vmax, vacc1);
_mm512_storeu_ps(output, vacc0);
_mm512_storeu_ps(output + 16, vacc1);
output += 32;
}
for (; c >= 16; c -= 16) {
__m512 vaccp0 = _mm512_load_ps(b);
b += 16;
const __m512 vi0x0 = _mm512_loadu_ps(i0);
i0 += 16;
__m512 vk0x0 = _mm512_load_ps(w);
vaccp0 = _mm512_fmadd_ps(vi0x0, vk0x0, vaccp0);
const __m512 vi1x0 = _mm512_loadu_ps(i1);
i1 += 16;
__m512 vk1x0 = _mm512_load_ps(w + 16);
vaccp0 = _mm512_fmadd_ps(vi1x0, vk1x0, vaccp0);
const __m512 vi2x0 = _mm512_loadu_ps(i2);
i2 += 16;
__m512 vk2x0 = _mm512_load_ps(w + 32);
vaccp0 = _mm512_fmadd_ps(vi2x0, vk2x0, vaccp0);
const __m512 vi3x0 = _mm512_loadu_ps(i3);
i3 += 16;
__m512 vk3x0 = _mm512_load_ps(w + 48);
vaccp0 = _mm512_fmadd_ps(vi3x0, vk3x0, vaccp0);
const __m512 vi4x0 = _mm512_loadu_ps(i4);
i4 += 16;
__m512 vk4x0 = _mm512_load_ps(w + 64);
vaccp0 = _mm512_fmadd_ps(vi4x0, vk4x0, vaccp0);
w += 80;
__m512 vacc = _mm512_max_ps(vmin, vaccp0);
vacc = _mm512_min_ps(vmax, vacc);
_mm512_storeu_ps(output, vacc);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 15);
__m512 vaccp0 = _mm512_load_ps(b);
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
const __m512 vi0x0 = _mm512_maskz_loadu_ps(vmask, i0);
__m512 vk0x0 = _mm512_load_ps(w);
vaccp0 = _mm512_fmadd_ps(vi0x0, vk0x0, vaccp0);
const __m512 vi1x0 = _mm512_maskz_loadu_ps(vmask, i1);
__m512 vk1x0 = _mm512_load_ps(w + 16);
vaccp0 = _mm512_fmadd_ps(vi1x0, vk1x0, vaccp0);
const __m512 vi2x0 = _mm512_maskz_loadu_ps(vmask, i2);
__m512 vk2x0 = _mm512_load_ps(w + 32);
vaccp0 = _mm512_fmadd_ps(vi2x0, vk2x0, vaccp0);
const __m512 vi3x0 = _mm512_maskz_loadu_ps(vmask, i3);
__m512 vk3x0 = _mm512_load_ps(w + 48);
vaccp0 = _mm512_fmadd_ps(vi3x0, vk3x0, vaccp0);
const __m512 vi4x0 = _mm512_maskz_loadu_ps(vmask, i4);
__m512 vk4x0 = _mm512_load_ps(w + 64);
vaccp0 = _mm512_fmadd_ps(vi4x0, vk4x0, vaccp0);
__m512 vacc = _mm512_max_ps(vmin, vaccp0);
vacc = _mm512_min_ps(vmax, vacc);
_mm512_mask_storeu_ps(output, vmask, vacc);
output += c;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 17,148 | 28.928447 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l4c4s4r-minmax-neon-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l4c4s4r__neon_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,294 | 30.086957 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l4c4s4r-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l4c4s4r__neon(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,869 | 29.4811 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l4c4s4r-minmax-neonfma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l4c4s4r__neonfma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,297 | 30.09699 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l4c4s4r-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l4c4s4r__neonfma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,872 | 29.491409 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l4c4s4r-minmax-sse-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l4c4s4r__sse_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
w += 24;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = 0;
for (; c < channels; c += 4) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 4);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
w += 20;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(b);
b += 4;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vk1x0123 = _mm_load_ps(w + 4);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
w += 20;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
__m128 vk1x0123 = _mm_load_ps(w + 4);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,467 | 28.132308 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l4c4s4r-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l4c4s4r__sse(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
w += 24;
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = 0;
for (; c < channels; c += 4) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
w += 20;
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(b);
b += 4;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vk1x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
w += 20;
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
__m128 vk1x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,106 | 27.728707 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l4c4s4r-minmax-wasmrelaxedsimd-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l4c4s4r__wasmrelaxedsimd_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
w += 24;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
w += 20;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
v128_t vacc0p0 = wasm_v128_load(b);
b += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
w += 20;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
v128_t vacc0 = __builtin_wasm_relaxed_max_f32x4(vacc0p0, vmin);
vacc0 = __builtin_wasm_relaxed_min_f32x4(vacc0, vmax);
wasm_v128_store(output, vacc0);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
// Add up all accumulators to vacc0123p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
v128_t vacc0 = __builtin_wasm_relaxed_max_f32x4(vacc0p0, vmin);
vacc0 = __builtin_wasm_relaxed_min_f32x4(vacc0, vmax);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0, 0);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,702 | 28.763804 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l4c4s4r-minmax-wasmrelaxedsimd-fma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l4c4s4r__wasmrelaxedsimd_fma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0p1 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0p0);
w += 24;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p1 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0p0);
w += 20;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
v128_t vacc0p0 = wasm_v128_load(b);
b += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p1 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0p0);
w += 20;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
v128_t vacc0 = __builtin_wasm_relaxed_max_f32x4(vacc0p0, vmin);
vacc0 = __builtin_wasm_relaxed_min_f32x4(vacc0, vmax);
wasm_v128_store(output, vacc0);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p1 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0p0);
// Add up all accumulators to vacc0123p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
v128_t vacc0 = __builtin_wasm_relaxed_max_f32x4(vacc0p0, vmin);
vacc0 = __builtin_wasm_relaxed_min_f32x4(vacc0, vmax);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0, 0);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,754 | 28.923313 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l4c4s4r-minmax-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l4c4s4r__wasmrelaxedsimd_fma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0p0);
w += 24;
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0p0);
w += 20;
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
v128_t vacc0p0 = wasm_v128_load(b);
b += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0p0);
w += 20;
v128_t vacc0 = __builtin_wasm_relaxed_max_f32x4(vacc0p0, vmin);
vacc0 = __builtin_wasm_relaxed_min_f32x4(vacc0, vmax);
wasm_v128_store(output, vacc0);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0p0);
v128_t vacc0 = __builtin_wasm_relaxed_max_f32x4(vacc0p0, vmin);
vacc0 = __builtin_wasm_relaxed_min_f32x4(vacc0, vmax);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0, 0);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,438 | 28.68239 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l4c4s4r-minmax-wasmrelaxedsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l4c4s4r__wasmrelaxedsimd(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
w += 24;
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
w += 20;
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
v128_t vacc0p0 = wasm_v128_load(b);
b += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
w += 20;
v128_t vacc0 = __builtin_wasm_relaxed_max_f32x4(vacc0p0, vmin);
vacc0 = __builtin_wasm_relaxed_min_f32x4(vacc0, vmax);
wasm_v128_store(output, vacc0);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
v128_t vacc0 = __builtin_wasm_relaxed_max_f32x4(vacc0p0, vmin);
vacc0 = __builtin_wasm_relaxed_min_f32x4(vacc0, vmax);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0, 0);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,374 | 28.481132 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l4c4s4r-minmax-wasmsimd-arm-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l4c4s4r__wasmsimd_arm_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
w += 24;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
w += 20;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
v128_t vacc0p0 = wasm_v128_load(b);
b += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
w += 20;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
v128_t vacc0 = wasm_f32x4_max(vacc0p0, vmin);
vacc0 = wasm_f32x4_min(vacc0, vmax);
wasm_v128_store(output, vacc0);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
// Add up all accumulators to vacc0123p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
v128_t vacc0 = wasm_f32x4_max(vacc0p0, vmin);
vacc0 = wasm_f32x4_min(vacc0, vmax);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0, 0);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,627 | 28.533742 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l4c4s4r-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l4c4s4r__wasmsimd_arm(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
w += 24;
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
w += 20;
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
v128_t vacc0p0 = wasm_v128_load(b);
b += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
w += 20;
v128_t vacc0 = wasm_f32x4_max(vacc0p0, vmin);
vacc0 = wasm_f32x4_min(vacc0, vmax);
wasm_v128_store(output, vacc0);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
v128_t vacc0 = wasm_f32x4_max(vacc0p0, vmin);
vacc0 = wasm_f32x4_min(vacc0, vmax);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0, 0);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,299 | 28.245283 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l4c4s4r-minmax-wasmsimd-x86-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l4c4s4r__wasmsimd_x86_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
w += 24;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
w += 20;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
v128_t vacc0p0 = wasm_v128_load(b);
b += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
w += 20;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
v128_t vacc0 = wasm_f32x4_pmax(vacc0p0, vmin);
vacc0 = wasm_f32x4_pmin(vacc0, vmax);
wasm_v128_store(output, vacc0);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
// Add up all accumulators to vacc0123p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
v128_t vacc0 = wasm_f32x4_pmax(vacc0p0, vmin);
vacc0 = wasm_f32x4_pmin(vacc0, vmax);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0, 0);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,631 | 28.546012 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l4c4s4r-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l4c4s4r__wasmsimd_x86(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
w += 24;
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
w += 20;
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
v128_t vacc0p0 = wasm_v128_load(b);
b += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
w += 20;
v128_t vacc0 = wasm_f32x4_pmax(vacc0p0, vmin);
vacc0 = wasm_f32x4_pmin(vacc0, vmax);
wasm_v128_store(output, vacc0);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
v128_t vacc0 = wasm_f32x4_pmax(vacc0p0, vmin);
vacc0 = wasm_f32x4_pmin(vacc0, vmax);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0, 0);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,303 | 28.257862 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l4c4s4r-wasmrelaxedsimd-fma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_5f5m5l4c4s4r__wasmrelaxedsimd_fma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0p1 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0p0);
w += 24;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p1 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0p0);
w += 20;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
v128_t vacc0p0 = wasm_v128_load(b);
b += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p1 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0p0);
w += 20;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
const v128_t vacc0 = vacc0p0;
wasm_v128_store(output, vacc0);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p1 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0p0);
// Add up all accumulators to vacc0123p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
v128_t vacc0 = vacc0p0;
if (c & 2) {
wasm_v128_store64_lane(output, vacc0, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0, 0);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,410 | 28.409375 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l4c4s4r-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_5f5m5l4c4s4r__wasmrelaxedsimd_fma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0p0);
w += 24;
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0p0);
w += 20;
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
v128_t vacc0p0 = wasm_v128_load(b);
b += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0p0);
w += 20;
const v128_t vacc0 = vacc0p0;
wasm_v128_store(output, vacc0);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0p0);
v128_t vacc0 = vacc0p0;
if (c & 2) {
wasm_v128_store64_lane(output, vacc0, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0, 0);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,094 | 28.150641 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l4c4s4r-wasmsimd-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_5f5m5l4c4s4r__wasmsimd_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
w += 24;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
w += 20;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
v128_t vacc0p0 = wasm_v128_load(b);
b += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
w += 20;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
const v128_t vacc0 = vacc0p0;
wasm_v128_store(output, vacc0);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
// Add up all accumulators to vacc0123p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
v128_t vacc0 = vacc0p0;
if (c & 2) {
wasm_v128_store64_lane(output, vacc0, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0, 0);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,351 | 28.225 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l4c4s4r-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_5f5m5l4c4s4r__wasmsimd(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
w += 24;
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
w += 20;
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
v128_t vacc0p0 = wasm_v128_load(b);
b += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
w += 20;
const v128_t vacc0 = vacc0p0;
wasm_v128_store(output, vacc0);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
v128_t vk3x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
v128_t vk4x0123 = wasm_v128_load(w + 16);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0p0);
v128_t vacc0 = vacc0p0;
if (c & 2) {
wasm_v128_store64_lane(output, vacc0, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0, 0);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,023 | 27.923077 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l8c4s4r-minmax-neon-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l8c4s4r__neon_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b);
float32x4_t vacc4567p0 = vld1q_f32(b + 4);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
float32x4_t vacc4567p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,214 | 33.647436 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l8c4s4r-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l8c4s4r__neon(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b);
float32x4_t vacc4567p0 = vld1q_f32(b + 4);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
float32x4_t vacc4567p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,306 | 32.940133 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l8c4s4r-minmax-neonfma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l8c4s4r__neonfma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b);
float32x4_t vacc4567p0 = vld1q_f32(b + 4);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
float32x4_t vacc4567p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,217 | 33.653846 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l8c4s4r-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l8c4s4r__neonfma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b);
float32x4_t vacc4567p0 = vld1q_f32(b + 4);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
float32x4_t vacc4567p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,309 | 32.946785 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l8c4s4r-minmax-sse-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l8c4s4r__sse_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m128 vacc0123p0 = _mm_load_ps(w);
__m128 vacc4567p0 = _mm_load_ps(w + 4);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vk0x0123 = _mm_load_ps(w + 8);
const __m128 vk0x4567 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vk1x0123 = _mm_load_ps(w + 16);
const __m128 vk1x4567 = _mm_load_ps(w + 20);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
__m128 vacc4567p1 = _mm_mul_ps(vi1x4567, vk1x4567);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vk2x0123 = _mm_load_ps(w + 24);
const __m128 vk2x4567 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vk3x0123 = _mm_load_ps(w + 32);
const __m128 vk3x4567 = _mm_load_ps(w + 36);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi3x4567, vk3x4567));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
i4 += 8;
const __m128 vk4x0123 = _mm_load_ps(w + 40);
const __m128 vk4x4567 = _mm_load_ps(w + 44);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
w += 48;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
vacc4567p0 = _mm_add_ps(vacc4567p0, vacc4567p1);
_mm_store_ps(b, vacc0123p0);
_mm_store_ps(b + 4, vacc4567p0);
b += 8;
}
if (c != 0) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
w += 24;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m128 vacc0123p0 = _mm_load_ps(b);
__m128 vacc4567p0 = _mm_load_ps(b + 4);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vk0x0123 = _mm_load_ps(w);
const __m128 vk0x4567 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
const __m128 vk1x4567 = _mm_load_ps(w + 12);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
__m128 vacc4567p1 = _mm_mul_ps(vi1x4567, vk1x4567);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vk2x0123 = _mm_load_ps(w + 16);
const __m128 vk2x4567 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vk3x0123 = _mm_load_ps(w + 24);
const __m128 vk3x4567 = _mm_load_ps(w + 28);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi3x4567, vk3x4567));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
i4 += 8;
const __m128 vk4x0123 = _mm_load_ps(w + 32);
const __m128 vk4x4567 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
w += 40;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
vacc4567p0 = _mm_add_ps(vacc4567p0, vacc4567p1);
_mm_store_ps(b, vacc0123p0);
_mm_store_ps(b + 4, vacc4567p0);
b += 8;
}
if (c != 0) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 4);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
w += 20;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m128 vacc0123p0 = _mm_load_ps(b);
__m128 vacc4567p0 = _mm_load_ps(b + 4);
b += 8;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
__m128 vk0x0123 = _mm_load_ps(w);
__m128 vk0x4567 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
__m128 vk1x0123 = _mm_load_ps(w + 8);
__m128 vk1x4567 = _mm_load_ps(w + 12);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
__m128 vacc4567p1 = _mm_mul_ps(vi1x4567, vk1x4567);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
i2 += 8;
__m128 vk2x0123 = _mm_load_ps(w + 16);
__m128 vk2x4567 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
i3 += 8;
__m128 vk3x0123 = _mm_load_ps(w + 24);
__m128 vk3x4567 = _mm_load_ps(w + 28);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi3x4567, vk3x4567));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
i4 += 8;
__m128 vk4x0123 = _mm_load_ps(w + 32);
__m128 vk4x4567 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
w += 40;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
vacc4567p0 = _mm_add_ps(vacc4567p0, vacc4567p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
__m128 vacc4567 = _mm_max_ps(vacc4567p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
vacc4567 = _mm_min_ps(vacc4567, vmax);
_mm_storeu_ps(output, vacc0123);
_mm_storeu_ps(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(b);
b += 4;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vk1x0123 = _mm_load_ps(w + 4);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
w += 20;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
__m128 vk1x0123 = _mm_load_ps(w + 4);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
// Add up all accumulators to vacc01234567p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,536 | 30.863198 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l8c4s4r-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l8c4s4r__sse(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m128 vacc0123p0 = _mm_load_ps(w);
__m128 vacc4567p0 = _mm_load_ps(w + 4);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vk0x0123 = _mm_load_ps(w + 8);
const __m128 vk0x4567 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vk1x0123 = _mm_load_ps(w + 16);
const __m128 vk1x4567 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi1x4567, vk1x4567));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vk2x0123 = _mm_load_ps(w + 24);
const __m128 vk2x4567 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vk3x0123 = _mm_load_ps(w + 32);
const __m128 vk3x4567 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi3x4567, vk3x4567));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
i4 += 8;
const __m128 vk4x0123 = _mm_load_ps(w + 40);
const __m128 vk4x4567 = _mm_load_ps(w + 44);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
w += 48;
_mm_store_ps(b, vacc0123p0);
_mm_store_ps(b + 4, vacc4567p0);
b += 8;
}
if (c != 0) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
w += 24;
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m128 vacc0123p0 = _mm_load_ps(b);
__m128 vacc4567p0 = _mm_load_ps(b + 4);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vk0x0123 = _mm_load_ps(w);
const __m128 vk0x4567 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
const __m128 vk1x4567 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi1x4567, vk1x4567));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vk2x0123 = _mm_load_ps(w + 16);
const __m128 vk2x4567 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vk3x0123 = _mm_load_ps(w + 24);
const __m128 vk3x4567 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi3x4567, vk3x4567));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
i4 += 8;
const __m128 vk4x0123 = _mm_load_ps(w + 32);
const __m128 vk4x4567 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
w += 40;
_mm_store_ps(b, vacc0123p0);
_mm_store_ps(b + 4, vacc4567p0);
b += 8;
}
if (c != 0) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
w += 20;
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m128 vacc0123p0 = _mm_load_ps(b);
__m128 vacc4567p0 = _mm_load_ps(b + 4);
b += 8;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
__m128 vk0x0123 = _mm_load_ps(w);
__m128 vk0x4567 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
__m128 vk1x0123 = _mm_load_ps(w + 8);
__m128 vk1x4567 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi1x4567, vk1x4567));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
i2 += 8;
__m128 vk2x0123 = _mm_load_ps(w + 16);
__m128 vk2x4567 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
i3 += 8;
__m128 vk3x0123 = _mm_load_ps(w + 24);
__m128 vk3x4567 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi3x4567, vk3x4567));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
i4 += 8;
__m128 vk4x0123 = _mm_load_ps(w + 32);
__m128 vk4x4567 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
w += 40;
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
__m128 vacc4567 = _mm_max_ps(vacc4567p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
vacc4567 = _mm_min_ps(vacc4567, vmax);
_mm_storeu_ps(output, vacc0123);
_mm_storeu_ps(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(b);
b += 4;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vk1x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
w += 20;
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
__m128 vk1x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,772 | 30.420319 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l8c8s4r-minmax-avx-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l8c8s4r__avx_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
w += 48;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
w += 48;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
w += 40;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
w += 40;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
b += 8;
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
w += 40;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 13,854 | 32.305288 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l8c8s4r-minmax-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l8c8s4r__avx(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
w += 48;
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
w += 48;
_mm256_store_ps(b, vacc01234567p0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
w += 40;
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
w += 40;
_mm256_store_ps(b, vacc01234567p0);
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
b += 8;
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
w += 40;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 13,243 | 31.782178 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l8c8s4r-minmax-fma3-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l8c8s4r__fma3_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
w += 48;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
w += 48;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
w += 40;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
w += 40;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
b += 8;
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
w += 40;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 13,543 | 31.557692 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-5f5m5l8c8s4r-minmax-fma3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_5f5m5l8c8s4r__fma3(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* w = weights;
// First pass to process 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
w += 48;
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
w += 48;
_mm256_store_ps(b, vacc01234567p0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
w += 40;
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
w += 40;
_mm256_store_ps(b, vacc01234567p0);
}
}
// Last pass to process up to 5 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
b += 8;
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
w += 40;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 12,854 | 30.819307 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-6f6m7l1c1s1r-minmax-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_6f6m7l1c1s1r__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* w = weights;
// First pass to process 6 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[6];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
w += 7;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
*b++ = vacc0p0;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[5];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
w += 6;
*b++ = vacc0p0;
}
}
// Last pass to process up to 7 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[5];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
const float vi6 = *i6++;
const float vk6 = w[6];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
w += 7;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,736 | 29.702381 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-6f6m7l1c1s1r-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_6f6m7l1c1s1r__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* w = weights;
// First pass to process 6 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[6];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
w += 7;
*b++ = vacc0p0;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[5];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
w += 6;
*b++ = vacc0p0;
}
}
// Last pass to process up to 7 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[5];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
const float vi6 = *i6++;
const float vk6 = w[6];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
w += 7;
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,539 | 29.650407 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-6f6m7l1c1s1r-minmax-wasm-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_6f6m7l1c1s1r__wasm_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* w = weights;
// First pass to process 6 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[6];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
w += 7;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
*b++ = vacc0p0;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[5];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
w += 6;
*b++ = vacc0p0;
}
}
// Last pass to process up to 7 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[5];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
const float vi6 = *i6++;
const float vk6 = w[6];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
w += 7;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
*output++ = vacc0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,754 | 29.77381 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-6f6m7l1c1s1r-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_6f6m7l1c1s1r__wasm(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* w = weights;
// First pass to process 6 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[6];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
w += 7;
*b++ = vacc0p0;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[5];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
w += 6;
*b++ = vacc0p0;
}
}
// Last pass to process up to 7 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[5];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
const float vi6 = *i6++;
const float vk6 = w[6];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
w += 7;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
*output++ = vacc0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,557 | 29.723577 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-6f6m7l1c1s1r-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_6f6m7l1c1s1r__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
do {
const float* w = weights;
// First pass to process 6 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[6];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
w += 7;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
*b++ = vacc0p0;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[5];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
w += 6;
*b++ = vacc0p0;
}
}
// Last pass to process up to 7 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[5];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
const float vi6 = *i6++;
const float vk6 = w[6];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
w += 7;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
*output++ = vacc0p0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,556 | 29.471774 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-6f6m7l1c1s1r-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_6f6m7l1c1s1r__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
do {
const float* w = weights;
// First pass to process 6 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[6];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
w += 7;
*b++ = vacc0p0;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[5];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
w += 6;
*b++ = vacc0p0;
}
}
// Last pass to process up to 7 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[5];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
const float vi6 = *i6++;
const float vk6 = w[6];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
w += 7;
*output++ = vacc0p0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,359 | 29.413223 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-6f6m7l4c4s4r-minmax-neon-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_6f6m7l4c4s4r__neon_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 6 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 7 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,048 | 30.659026 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-6f6m7l4c4s4r-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_6f6m7l4c4s4r__neon(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 6 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 7 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,623 | 30.155425 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-6f6m7l4c4s4r-minmax-neonfma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_6f6m7l4c4s4r__neonfma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 6 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 7 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,051 | 30.667622 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-6f6m7l4c4s4r-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_6f6m7l4c4s4r__neonfma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 6 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 7 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,626 | 30.164223 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-6f6m7l4c4s4r-minmax-sse-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_6f6m7l4c4s4r__sse_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* w = weights;
// First pass to process 6 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vk5x0123 = _mm_load_ps(w + 24);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
w += 28;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = 0;
for (; c < channels; c += 4) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 4);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vk5x0123 = _mm_load_ps(w + 20);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
w += 24;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Last pass to process up to 7 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(b);
b += 4;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vk1x0123 = _mm_load_ps(w + 4);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
__m128 vk5x0123 = _mm_load_ps(w + 20);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
i6 += 4;
__m128 vk6x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
w += 28;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
__m128 vk1x0123 = _mm_load_ps(w + 4);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
__m128 vk5x0123 = _mm_load_ps(w + 20);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
__m128 vk6x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,283 | 28.773087 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-6f6m7l4c4s4r-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_6f6m7l4c4s4r__sse(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* w = weights;
// First pass to process 6 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vk5x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
w += 28;
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = 0;
for (; c < channels; c += 4) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vk5x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
w += 24;
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Last pass to process up to 7 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(b);
b += 4;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vk1x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
__m128 vk5x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
i6 += 4;
__m128 vk6x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
w += 28;
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
__m128 vk1x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
__m128 vk5x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
__m128 vk6x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,922 | 28.442049 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-6f6m7l8c4s4r-minmax-neon-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_6f6m7l8c4s4r__neon_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 6 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi5x4567, vk5x4567);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b);
float32x4_t vacc4567p0 = vld1q_f32(b + 4);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi5x4567, vk5x4567);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 7 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
float32x4_t vacc4567p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi5x4567, vk5x4567);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi6x4567, vk6x4567);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 19,426 | 34.193841 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-6f6m7l8c4s4r-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_6f6m7l8c4s4r__neon(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 6 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi5x4567, vk5x4567);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b);
float32x4_t vacc4567p0 = vld1q_f32(b + 4);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi5x4567, vk5x4567);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 7 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
float32x4_t vacc4567p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi5x4567, vk5x4567);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi6x4567, vk6x4567);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 18,518 | 33.614953 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-6f6m7l8c4s4r-minmax-neonfma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_6f6m7l8c4s4r__neonfma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 6 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi5x4567, vk5x4567);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b);
float32x4_t vacc4567p0 = vld1q_f32(b + 4);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi5x4567, vk5x4567);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 7 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
float32x4_t vacc4567p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi5x4567, vk5x4567);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi6x4567, vk6x4567);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 19,429 | 34.199275 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-6f6m7l8c4s4r-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_6f6m7l8c4s4r__neonfma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 6 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi5x4567, vk5x4567);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b);
float32x4_t vacc4567p0 = vld1q_f32(b + 4);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi5x4567, vk5x4567);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 7 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
float32x4_t vacc4567p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi5x4567, vk5x4567);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi6x4567, vk6x4567);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 18,521 | 33.620561 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-6f6m7l8c4s4r-minmax-sse-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_6f6m7l8c4s4r__sse_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* w = weights;
// First pass to process 6 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m128 vacc0123p0 = _mm_load_ps(w);
__m128 vacc4567p0 = _mm_load_ps(w + 4);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vk0x0123 = _mm_load_ps(w + 8);
const __m128 vk0x4567 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vk1x0123 = _mm_load_ps(w + 16);
const __m128 vk1x4567 = _mm_load_ps(w + 20);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
__m128 vacc4567p1 = _mm_mul_ps(vi1x4567, vk1x4567);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vk2x0123 = _mm_load_ps(w + 24);
const __m128 vk2x4567 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vk3x0123 = _mm_load_ps(w + 32);
const __m128 vk3x4567 = _mm_load_ps(w + 36);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi3x4567, vk3x4567));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
i4 += 8;
const __m128 vk4x0123 = _mm_load_ps(w + 40);
const __m128 vk4x4567 = _mm_load_ps(w + 44);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
const __m128 vi5x4567 = _mm_loadu_ps(i5 + 4);
i5 += 8;
const __m128 vk5x0123 = _mm_load_ps(w + 48);
const __m128 vk5x4567 = _mm_load_ps(w + 52);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi5x4567, vk5x4567));
w += 56;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
vacc4567p0 = _mm_add_ps(vacc4567p0, vacc4567p1);
_mm_store_ps(b, vacc0123p0);
_mm_store_ps(b + 4, vacc4567p0);
b += 8;
}
if (c != 0) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vk5x0123 = _mm_load_ps(w + 24);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
w += 28;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m128 vacc0123p0 = _mm_load_ps(b);
__m128 vacc4567p0 = _mm_load_ps(b + 4);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vk0x0123 = _mm_load_ps(w);
const __m128 vk0x4567 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
const __m128 vk1x4567 = _mm_load_ps(w + 12);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
__m128 vacc4567p1 = _mm_mul_ps(vi1x4567, vk1x4567);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vk2x0123 = _mm_load_ps(w + 16);
const __m128 vk2x4567 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vk3x0123 = _mm_load_ps(w + 24);
const __m128 vk3x4567 = _mm_load_ps(w + 28);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi3x4567, vk3x4567));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
i4 += 8;
const __m128 vk4x0123 = _mm_load_ps(w + 32);
const __m128 vk4x4567 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
const __m128 vi5x4567 = _mm_loadu_ps(i5 + 4);
i5 += 8;
const __m128 vk5x0123 = _mm_load_ps(w + 40);
const __m128 vk5x4567 = _mm_load_ps(w + 44);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi5x4567, vk5x4567));
w += 48;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
vacc4567p0 = _mm_add_ps(vacc4567p0, vacc4567p1);
_mm_store_ps(b, vacc0123p0);
_mm_store_ps(b + 4, vacc4567p0);
b += 8;
}
if (c != 0) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 4);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vk5x0123 = _mm_load_ps(w + 20);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
w += 24;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Last pass to process up to 7 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m128 vacc0123p0 = _mm_load_ps(b);
__m128 vacc4567p0 = _mm_load_ps(b + 4);
b += 8;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
__m128 vk0x0123 = _mm_load_ps(w);
__m128 vk0x4567 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
__m128 vk1x0123 = _mm_load_ps(w + 8);
__m128 vk1x4567 = _mm_load_ps(w + 12);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
__m128 vacc4567p1 = _mm_mul_ps(vi1x4567, vk1x4567);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
i2 += 8;
__m128 vk2x0123 = _mm_load_ps(w + 16);
__m128 vk2x4567 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
i3 += 8;
__m128 vk3x0123 = _mm_load_ps(w + 24);
__m128 vk3x4567 = _mm_load_ps(w + 28);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi3x4567, vk3x4567));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
i4 += 8;
__m128 vk4x0123 = _mm_load_ps(w + 32);
__m128 vk4x4567 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
const __m128 vi5x4567 = _mm_loadu_ps(i5 + 4);
i5 += 8;
__m128 vk5x0123 = _mm_load_ps(w + 40);
__m128 vk5x4567 = _mm_load_ps(w + 44);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi5x4567, vk5x4567));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
const __m128 vi6x4567 = _mm_loadu_ps(i6 + 4);
i6 += 8;
__m128 vk6x0123 = _mm_load_ps(w + 48);
__m128 vk6x4567 = _mm_load_ps(w + 52);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi6x4567, vk6x4567));
w += 56;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
vacc4567p0 = _mm_add_ps(vacc4567p0, vacc4567p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
__m128 vacc4567 = _mm_max_ps(vacc4567p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
vacc4567 = _mm_min_ps(vacc4567, vmax);
_mm_storeu_ps(output, vacc0123);
_mm_storeu_ps(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(b);
b += 4;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vk1x0123 = _mm_load_ps(w + 4);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
__m128 vk5x0123 = _mm_load_ps(w + 20);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
i6 += 4;
__m128 vk6x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
w += 28;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
__m128 vk1x0123 = _mm_load_ps(w + 4);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
__m128 vk5x0123 = _mm_load_ps(w + 20);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
__m128 vk6x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
// Add up all accumulators to vacc01234567p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 19,862 | 31.509002 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-6f6m7l8c4s4r-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_6f6m7l8c4s4r__sse(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* w = weights;
// First pass to process 6 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m128 vacc0123p0 = _mm_load_ps(w);
__m128 vacc4567p0 = _mm_load_ps(w + 4);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vk0x0123 = _mm_load_ps(w + 8);
const __m128 vk0x4567 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vk1x0123 = _mm_load_ps(w + 16);
const __m128 vk1x4567 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi1x4567, vk1x4567));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vk2x0123 = _mm_load_ps(w + 24);
const __m128 vk2x4567 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vk3x0123 = _mm_load_ps(w + 32);
const __m128 vk3x4567 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi3x4567, vk3x4567));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
i4 += 8;
const __m128 vk4x0123 = _mm_load_ps(w + 40);
const __m128 vk4x4567 = _mm_load_ps(w + 44);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
const __m128 vi5x4567 = _mm_loadu_ps(i5 + 4);
i5 += 8;
const __m128 vk5x0123 = _mm_load_ps(w + 48);
const __m128 vk5x4567 = _mm_load_ps(w + 52);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi5x4567, vk5x4567));
w += 56;
_mm_store_ps(b, vacc0123p0);
_mm_store_ps(b + 4, vacc4567p0);
b += 8;
}
if (c != 0) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vk5x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
w += 28;
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m128 vacc0123p0 = _mm_load_ps(b);
__m128 vacc4567p0 = _mm_load_ps(b + 4);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vk0x0123 = _mm_load_ps(w);
const __m128 vk0x4567 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
const __m128 vk1x4567 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi1x4567, vk1x4567));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vk2x0123 = _mm_load_ps(w + 16);
const __m128 vk2x4567 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vk3x0123 = _mm_load_ps(w + 24);
const __m128 vk3x4567 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi3x4567, vk3x4567));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
i4 += 8;
const __m128 vk4x0123 = _mm_load_ps(w + 32);
const __m128 vk4x4567 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
const __m128 vi5x4567 = _mm_loadu_ps(i5 + 4);
i5 += 8;
const __m128 vk5x0123 = _mm_load_ps(w + 40);
const __m128 vk5x4567 = _mm_load_ps(w + 44);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi5x4567, vk5x4567));
w += 48;
_mm_store_ps(b, vacc0123p0);
_mm_store_ps(b + 4, vacc4567p0);
b += 8;
}
if (c != 0) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vk5x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
w += 24;
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Last pass to process up to 7 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m128 vacc0123p0 = _mm_load_ps(b);
__m128 vacc4567p0 = _mm_load_ps(b + 4);
b += 8;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
__m128 vk0x0123 = _mm_load_ps(w);
__m128 vk0x4567 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
__m128 vk1x0123 = _mm_load_ps(w + 8);
__m128 vk1x4567 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi1x4567, vk1x4567));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
i2 += 8;
__m128 vk2x0123 = _mm_load_ps(w + 16);
__m128 vk2x4567 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
i3 += 8;
__m128 vk3x0123 = _mm_load_ps(w + 24);
__m128 vk3x4567 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi3x4567, vk3x4567));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
i4 += 8;
__m128 vk4x0123 = _mm_load_ps(w + 32);
__m128 vk4x4567 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
const __m128 vi5x4567 = _mm_loadu_ps(i5 + 4);
i5 += 8;
__m128 vk5x0123 = _mm_load_ps(w + 40);
__m128 vk5x4567 = _mm_load_ps(w + 44);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi5x4567, vk5x4567));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
const __m128 vi6x4567 = _mm_loadu_ps(i6 + 4);
i6 += 8;
__m128 vk6x0123 = _mm_load_ps(w + 48);
__m128 vk6x4567 = _mm_load_ps(w + 52);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi6x4567, vk6x4567));
w += 56;
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
__m128 vacc4567 = _mm_max_ps(vacc4567p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
vacc4567 = _mm_min_ps(vacc4567, vmax);
_mm_storeu_ps(output, vacc0123);
_mm_storeu_ps(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(b);
b += 4;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vk1x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
__m128 vk5x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
i6 += 4;
__m128 vk6x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
w += 28;
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
__m128 vk1x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
__m128 vk5x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
__m128 vk6x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 19,098 | 31.153199 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-6f6m7l8c8s4r-minmax-avx-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_6f6m7l8c8s4r__avx_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* w = weights;
// First pass to process 6 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi5x01234567, vk5x01234567));
w += 56;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi5x01234567, vk5x01234567));
w += 56;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi5x01234567, vk5x01234567));
w += 48;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi5x01234567, vk5x01234567));
w += 48;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
}
}
// Last pass to process up to 7 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
b += 8;
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
__m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
i6 += 8;
__m256 vk6x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
w += 56;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
__m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
__m256 vk6x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,362 | 33.089583 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-6f6m7l8c8s4r-minmax-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_6f6m7l8c8s4r__avx(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* w = weights;
// First pass to process 6 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi5x01234567, vk5x01234567));
w += 56;
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi5x01234567, vk5x01234567));
w += 56;
_mm256_store_ps(b, vacc01234567p0);
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi5x01234567, vk5x01234567));
w += 48;
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi5x01234567, vk5x01234567));
w += 48;
_mm256_store_ps(b, vacc01234567p0);
}
}
// Last pass to process up to 7 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
b += 8;
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
__m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
i6 += 8;
__m256 vk6x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
w += 56;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
__m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
__m256 vk6x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,751 | 32.65812 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-7f6m6l16c8s4r-minmax-fma3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_7f6m6l16c8s4r__fma3(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 7);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* w = weights;
// First pass to process 7 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
input += 7;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(w + 8);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
const __m256 vk0x89ABCDEF = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi0x89ABCDEF, vk0x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
const __m256 vk1x89ABCDEF = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi1x89ABCDEF, vk1x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
const __m256 vk2x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi2x89ABCDEF, vk2x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
i3 += 16;
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
const __m256 vk3x89ABCDEF = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi3x89ABCDEF, vk3x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8);
i4 += 16;
const __m256 vk4x01234567 = _mm256_load_ps(w + 80);
const __m256 vk4x89ABCDEF = _mm256_load_ps(w + 88);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi4x89ABCDEF, vk4x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
const __m256 vi5x89ABCDEF = _mm256_loadu_ps(i5 + 8);
i5 += 16;
const __m256 vk5x01234567 = _mm256_load_ps(w + 96);
const __m256 vk5x89ABCDEF = _mm256_load_ps(w + 104);
vacc01234567p0 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi5x89ABCDEF, vk5x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
const __m256 vi6x89ABCDEF = _mm256_loadu_ps(i6 + 8);
i6 += 16;
const __m256 vk6x01234567 = _mm256_load_ps(w + 112);
const __m256 vk6x89ABCDEF = _mm256_load_ps(w + 120);
vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi6x89ABCDEF, vk6x89ABCDEF, vacc89ABCDEFp0);
w += 128;
_mm256_store_ps(b, vacc01234567p0);
_mm256_store_ps(b + 8, vacc89ABCDEFp0);
b += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0);
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
i6 += 8;
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
w += 64;
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0);
const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
w += 64;
_mm256_store_ps(b, vacc01234567p0);
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 7; ks > 6; ks -= 6) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 4);
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(b + 8);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
const __m256 vk0x01234567 = _mm256_load_ps(w);
const __m256 vk0x89ABCDEF = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi0x89ABCDEF, vk0x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
const __m256 vk1x89ABCDEF = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi1x89ABCDEF, vk1x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
const __m256 vk2x01234567 = _mm256_load_ps(w + 32);
const __m256 vk2x89ABCDEF = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi2x89ABCDEF, vk2x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
i3 += 16;
const __m256 vk3x01234567 = _mm256_load_ps(w + 48);
const __m256 vk3x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi3x89ABCDEF, vk3x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8);
i4 += 16;
const __m256 vk4x01234567 = _mm256_load_ps(w + 64);
const __m256 vk4x89ABCDEF = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi4x89ABCDEF, vk4x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
const __m256 vi5x89ABCDEF = _mm256_loadu_ps(i5 + 8);
i5 += 16;
const __m256 vk5x01234567 = _mm256_load_ps(w + 80);
const __m256 vk5x89ABCDEF = _mm256_load_ps(w + 88);
vacc01234567p0 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi5x89ABCDEF, vk5x89ABCDEF, vacc89ABCDEFp0);
w += 96;
_mm256_store_ps(b, vacc01234567p0);
_mm256_store_ps(b + 8, vacc89ABCDEFp0);
b += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0);
w += 48;
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0);
w += 48;
_mm256_store_ps(b, vacc01234567p0);
}
}
// Last pass to process up to 6 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
size_t c = channels;
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(b + 8);
b += 16;
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
__m256 vk0x01234567 = _mm256_load_ps(w);
__m256 vk0x89ABCDEF = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi0x89ABCDEF, vk0x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
__m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vk1x89ABCDEF = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi1x89ABCDEF, vk1x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
__m256 vk2x01234567 = _mm256_load_ps(w + 32);
__m256 vk2x89ABCDEF = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi2x89ABCDEF, vk2x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
i3 += 16;
__m256 vk3x01234567 = _mm256_load_ps(w + 48);
__m256 vk3x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi3x89ABCDEF, vk3x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8);
i4 += 16;
__m256 vk4x01234567 = _mm256_load_ps(w + 64);
__m256 vk4x89ABCDEF = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi4x89ABCDEF, vk4x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
const __m256 vi5x89ABCDEF = _mm256_loadu_ps(i5 + 8);
i5 += 16;
__m256 vk5x01234567 = _mm256_load_ps(w + 80);
__m256 vk5x89ABCDEF = _mm256_load_ps(w + 88);
vacc01234567p0 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi5x89ABCDEF, vk5x89ABCDEF, vacc89ABCDEFp0);
w += 96;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
__m256 vacc89ABCDEF = _mm256_max_ps(vmin, vacc89ABCDEFp0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
vacc89ABCDEF = _mm256_min_ps(vmax, vacc89ABCDEF);
_mm256_storeu_ps(output, vacc01234567);
_mm256_storeu_ps(output + 8, vacc89ABCDEF);
output += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
b += 8;
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
__m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0);
w += 48;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
__m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 24,480 | 34.582849 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-7f6m6l8c8s4r-minmax-fma3-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_7f6m6l8c8s4r__fma3_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 7);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* w = weights;
// First pass to process 7 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
input += 7;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p1 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p1);
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
i6 += 8;
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
w += 64;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p1 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p1);
const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
w += 64;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 7; ks > 6; ks -= 6) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p1 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p1);
w += 48;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p1 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p1);
w += 48;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
}
}
// Last pass to process up to 6 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
b += 8;
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
__m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p1 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p1);
w += 48;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
__m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p1 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p1);
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,959 | 32.25 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-7f6m6l8c8s4r-minmax-fma3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_7f6m6l8c8s4r__fma3(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 7);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* w = weights;
// First pass to process 7 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
input += 7;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0);
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
i6 += 8;
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
w += 64;
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0);
const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
w += 64;
_mm256_store_ps(b, vacc01234567p0);
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 7; ks > 6; ks -= 6) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0);
w += 48;
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0);
w += 48;
_mm256_store_ps(b, vacc01234567p0);
}
}
// Last pass to process up to 6 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
b += 8;
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
__m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0);
w += 48;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
__m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,270 | 31.630342 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-8f8m9l1c1s1r-minmax-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_8f8m9l1c1s1r__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* w = weights;
// First pass to process 8 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[6];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
const float vi6 = *i6++;
const float vk6 = w[7];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[8];
vacc0p1 = math_muladd_f32(vi7, vk7, vacc0p1);
w += 9;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
*b++ = vacc0p0;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[5];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
const float vi6 = *i6++;
const float vk6 = w[6];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[7];
vacc0p1 = math_muladd_f32(vi7, vk7, vacc0p1);
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
w += 8;
*b++ = vacc0p0;
}
}
// Last pass to process up to 9 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[5];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
const float vi6 = *i6++;
const float vk6 = w[6];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[7];
vacc0p1 = math_muladd_f32(vi7, vk7, vacc0p1);
const float vi8 = *i8++;
const float vk8 = w[8];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
w += 9;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,470 | 30.57 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-8f8m9l1c1s1r-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_8f8m9l1c1s1r__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* w = weights;
// First pass to process 8 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[6];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
const float vi6 = *i6++;
const float vk6 = w[7];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[8];
vacc0p0 = math_muladd_f32(vi7, vk7, vacc0p0);
w += 9;
*b++ = vacc0p0;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[5];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
const float vi6 = *i6++;
const float vk6 = w[6];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[7];
vacc0p0 = math_muladd_f32(vi7, vk7, vacc0p0);
w += 8;
*b++ = vacc0p0;
}
}
// Last pass to process up to 9 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[5];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
const float vi6 = *i6++;
const float vk6 = w[6];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[7];
vacc0p0 = math_muladd_f32(vi7, vk7, vacc0p0);
const float vi8 = *i8++;
const float vk8 = w[8];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
w += 9;
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,273 | 30.544218 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-8f8m9l1c1s1r-minmax-wasm-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_8f8m9l1c1s1r__wasm_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* w = weights;
// First pass to process 8 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[6];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
const float vi6 = *i6++;
const float vk6 = w[7];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[8];
vacc0p1 = math_muladd_f32(vi7, vk7, vacc0p1);
w += 9;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
*b++ = vacc0p0;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[5];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
const float vi6 = *i6++;
const float vk6 = w[6];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[7];
vacc0p1 = math_muladd_f32(vi7, vk7, vacc0p1);
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
w += 8;
*b++ = vacc0p0;
}
}
// Last pass to process up to 9 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[5];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
const float vi6 = *i6++;
const float vk6 = w[6];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[7];
vacc0p1 = math_muladd_f32(vi7, vk7, vacc0p1);
const float vi8 = *i8++;
const float vk8 = w[8];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
w += 9;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
*output++ = vacc0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,488 | 30.63 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-8f8m9l1c1s1r-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_8f8m9l1c1s1r__wasm(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* w = weights;
// First pass to process 8 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[6];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
const float vi6 = *i6++;
const float vk6 = w[7];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[8];
vacc0p0 = math_muladd_f32(vi7, vk7, vacc0p0);
w += 9;
*b++ = vacc0p0;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[5];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
const float vi6 = *i6++;
const float vk6 = w[6];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[7];
vacc0p0 = math_muladd_f32(vi7, vk7, vacc0p0);
w += 8;
*b++ = vacc0p0;
}
}
// Last pass to process up to 9 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[5];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
const float vi6 = *i6++;
const float vk6 = w[6];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[7];
vacc0p0 = math_muladd_f32(vi7, vk7, vacc0p0);
const float vi8 = *i8++;
const float vk8 = w[8];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
w += 9;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
*output++ = vacc0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,291 | 30.605442 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p1c-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_9p1c__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
do {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[6];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
const float vi6 = *i6++;
const float vk6 = w[7];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[8];
vacc0p0 = math_muladd_f32(vi7, vk7, vacc0p0);
const float vi8 = *i8++;
const float vk8 = w[9];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
w += 10;
*output++ = vacc0p0;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,554 | 26.55814 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-8f8m9l1c1s1r-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_8f8m9l1c1s1r__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
do {
const float* w = weights;
// First pass to process 8 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[6];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
const float vi6 = *i6++;
const float vk6 = w[7];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[8];
vacc0p1 = math_muladd_f32(vi7, vk7, vacc0p1);
w += 9;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
*b++ = vacc0p0;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[5];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
const float vi6 = *i6++;
const float vk6 = w[6];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[7];
vacc0p1 = math_muladd_f32(vi7, vk7, vacc0p1);
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
w += 8;
*b++ = vacc0p0;
}
}
// Last pass to process up to 9 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[5];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
const float vi6 = *i6++;
const float vk6 = w[6];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[7];
vacc0p1 = math_muladd_f32(vi7, vk7, vacc0p1);
const float vi8 = *i8++;
const float vk8 = w[8];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
w += 9;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
*output++ = vacc0p0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,290 | 30.388514 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-8f8m9l1c1s1r-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_8f8m9l1c1s1r__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
do {
const float* w = weights;
// First pass to process 8 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[6];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
const float vi6 = *i6++;
const float vk6 = w[7];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[8];
vacc0p0 = math_muladd_f32(vi7, vk7, vacc0p0);
w += 9;
*b++ = vacc0p0;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[5];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
const float vi6 = *i6++;
const float vk6 = w[6];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[7];
vacc0p0 = math_muladd_f32(vi7, vk7, vacc0p0);
w += 8;
*b++ = vacc0p0;
}
}
// Last pass to process up to 9 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[3];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[4];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[5];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
const float vi6 = *i6++;
const float vk6 = w[6];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[7];
vacc0p0 = math_muladd_f32(vi7, vk7, vacc0p0);
const float vi8 = *i8++;
const float vk8 = w[8];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
w += 9;
*output++ = vacc0p0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,093 | 30.358621 | 76 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.