repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-8f8m9l4c4s4r-minmax-neon-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_8f8m9l4c4s4r__neon_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 8 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 9 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 13,514 | 31.25537 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-8f8m9l4c4s4r-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_8f8m9l4c4s4r__neon(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 8 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi7x0123, vk7x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi7x0123, vk7x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 9 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 13,089 | 30.849148 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-8f8m9l4c4s4r-minmax-neonfma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_8f8m9l4c4s4r__neonfma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 8 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 9 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 13,517 | 31.26253 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-8f8m9l4c4s4r-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_8f8m9l4c4s4r__neonfma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 8 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 9 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 13,092 | 30.856448 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-8f8m9l4c4s4r-minmax-sse-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_8f8m9l4c4s4r__sse_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* w = weights;
// First pass to process 8 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vk5x0123 = _mm_load_ps(w + 24);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vk6x0123 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vk7x0123 = _mm_load_ps(w + 32);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi7x0123, vk7x0123));
w += 36;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = 0;
for (; c < channels; c += 4) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 4);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vk5x0123 = _mm_load_ps(w + 20);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vk6x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vk7x0123 = _mm_load_ps(w + 28);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi7x0123, vk7x0123));
w += 32;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Last pass to process up to 9 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(b);
b += 4;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vk1x0123 = _mm_load_ps(w + 4);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
__m128 vk5x0123 = _mm_load_ps(w + 20);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
i6 += 4;
__m128 vk6x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
i7 += 4;
__m128 vk7x0123 = _mm_load_ps(w + 28);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi7x0123, vk7x0123));
const __m128 vi8x0123 = _mm_loadu_ps(i8);
i8 += 4;
__m128 vk8x0123 = _mm_load_ps(w + 32);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
w += 36;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
__m128 vk1x0123 = _mm_load_ps(w + 4);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
__m128 vk5x0123 = _mm_load_ps(w + 20);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
__m128 vk6x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
__m128 vk7x0123 = _mm_load_ps(w + 28);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi7x0123, vk7x0123));
const __m128 vi8x0123 = _mm_loadu_ps(i8);
__m128 vk8x0123 = _mm_load_ps(w + 32);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 13,837 | 29.413187 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-8f8m9l4c4s4r-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_8f8m9l4c4s4r__sse(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* w = weights;
// First pass to process 8 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vk5x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vk6x0123 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vk7x0123 = _mm_load_ps(w + 32);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi7x0123, vk7x0123));
w += 36;
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = 0;
for (; c < channels; c += 4) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vk5x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vk6x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vk7x0123 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi7x0123, vk7x0123));
w += 32;
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Last pass to process up to 9 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(b);
b += 4;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vk1x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
__m128 vk5x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
i6 += 4;
__m128 vk6x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
i7 += 4;
__m128 vk7x0123 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi7x0123, vk7x0123));
const __m128 vi8x0123 = _mm_loadu_ps(i8);
i8 += 4;
__m128 vk8x0123 = _mm_load_ps(w + 32);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
w += 36;
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
__m128 vk1x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
__m128 vk5x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
__m128 vk6x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
__m128 vk7x0123 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi7x0123, vk7x0123));
const __m128 vi8x0123 = _mm_loadu_ps(i8);
__m128 vk8x0123 = _mm_load_ps(w + 32);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 13,476 | 29.149888 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-8f8m9l8c4s4r-minmax-neon-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_8f8m9l8c4s4r__neon_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 8 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi5x4567, vk5x4567);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi6x4567, vk6x4567);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi7x4567, vk7x4567);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b);
float32x4_t vacc4567p0 = vld1q_f32(b + 4);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi5x4567, vk5x4567);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi6x4567, vk6x4567);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi7x4567, vk7x4567);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 9 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
float32x4_t vacc4567p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi5x4567, vk5x4567);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi6x4567, vk6x4567);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi7x4567, vk7x4567);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vi8x4567 = vld1q_f32(i8); i8 += 4;
float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
float32x4_t vk8x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi8x4567, vk8x4567);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 24,090 | 34.849702 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-8f8m9l8c4s4r-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_8f8m9l8c4s4r__neon(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 8 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi5x4567, vk5x4567);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi6x4567, vk6x4567);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi7x0123, vk7x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi7x4567, vk7x4567);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi7x0123, vk7x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b);
float32x4_t vacc4567p0 = vld1q_f32(b + 4);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi5x4567, vk5x4567);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi6x4567, vk6x4567);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi7x0123, vk7x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi7x4567, vk7x4567);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi7x0123, vk7x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 9 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
float32x4_t vacc4567p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi5x4567, vk5x4567);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi6x4567, vk6x4567);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi7x0123, vk7x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi7x4567, vk7x4567);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vi8x4567 = vld1q_f32(i8); i8 += 4;
float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
float32x4_t vk8x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi8x4567, vk8x4567);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,182 | 34.393893 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-8f8m9l8c4s4r-minmax-neonfma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_8f8m9l8c4s4r__neonfma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 8 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi5x4567, vk5x4567);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi6x4567, vk6x4567);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi7x4567, vk7x4567);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b);
float32x4_t vacc4567p0 = vld1q_f32(b + 4);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi5x4567, vk5x4567);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi6x4567, vk6x4567);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi7x4567, vk7x4567);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 9 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
float32x4_t vacc4567p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi5x4567, vk5x4567);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi6x4567, vk6x4567);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi7x4567, vk7x4567);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vi8x4567 = vld1q_f32(i8); i8 += 4;
float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
float32x4_t vk8x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi8x4567, vk8x4567);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 24,093 | 34.854167 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-8f8m9l8c4s4r-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_8f8m9l8c4s4r__neonfma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 8 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi5x4567, vk5x4567);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi6x4567, vk6x4567);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi7x4567, vk7x4567);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b);
float32x4_t vacc4567p0 = vld1q_f32(b + 4);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi5x4567, vk5x4567);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi6x4567, vk6x4567);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi7x4567, vk7x4567);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 9 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
float32x4_t vacc4567p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi5x4567, vk5x4567);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi6x4567, vk6x4567);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi7x4567, vk7x4567);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vi8x4567 = vld1q_f32(i8); i8 += 4;
float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
float32x4_t vk8x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi8x4567, vk8x4567);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,185 | 34.398473 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-8f8m9l8c4s4r-minmax-sse-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_8f8m9l8c4s4r__sse_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* w = weights;
// First pass to process 8 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m128 vacc0123p0 = _mm_load_ps(w);
__m128 vacc4567p0 = _mm_load_ps(w + 4);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vk0x0123 = _mm_load_ps(w + 8);
const __m128 vk0x4567 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vk1x0123 = _mm_load_ps(w + 16);
const __m128 vk1x4567 = _mm_load_ps(w + 20);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
__m128 vacc4567p1 = _mm_mul_ps(vi1x4567, vk1x4567);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vk2x0123 = _mm_load_ps(w + 24);
const __m128 vk2x4567 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vk3x0123 = _mm_load_ps(w + 32);
const __m128 vk3x4567 = _mm_load_ps(w + 36);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi3x4567, vk3x4567));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
i4 += 8;
const __m128 vk4x0123 = _mm_load_ps(w + 40);
const __m128 vk4x4567 = _mm_load_ps(w + 44);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
const __m128 vi5x4567 = _mm_loadu_ps(i5 + 4);
i5 += 8;
const __m128 vk5x0123 = _mm_load_ps(w + 48);
const __m128 vk5x4567 = _mm_load_ps(w + 52);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi5x4567, vk5x4567));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
const __m128 vi6x4567 = _mm_loadu_ps(i6 + 4);
i6 += 8;
const __m128 vk6x0123 = _mm_load_ps(w + 56);
const __m128 vk6x4567 = _mm_load_ps(w + 60);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi6x4567, vk6x4567));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
const __m128 vi7x4567 = _mm_loadu_ps(i7 + 4);
i7 += 8;
const __m128 vk7x0123 = _mm_load_ps(w + 64);
const __m128 vk7x4567 = _mm_load_ps(w + 68);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi7x0123, vk7x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi7x4567, vk7x4567));
w += 72;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
vacc4567p0 = _mm_add_ps(vacc4567p0, vacc4567p1);
_mm_store_ps(b, vacc0123p0);
_mm_store_ps(b + 4, vacc4567p0);
b += 8;
}
if (c != 0) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vk5x0123 = _mm_load_ps(w + 24);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vk6x0123 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vk7x0123 = _mm_load_ps(w + 32);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi7x0123, vk7x0123));
w += 36;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m128 vacc0123p0 = _mm_load_ps(b);
__m128 vacc4567p0 = _mm_load_ps(b + 4);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vk0x0123 = _mm_load_ps(w);
const __m128 vk0x4567 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
const __m128 vk1x4567 = _mm_load_ps(w + 12);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
__m128 vacc4567p1 = _mm_mul_ps(vi1x4567, vk1x4567);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vk2x0123 = _mm_load_ps(w + 16);
const __m128 vk2x4567 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vk3x0123 = _mm_load_ps(w + 24);
const __m128 vk3x4567 = _mm_load_ps(w + 28);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi3x4567, vk3x4567));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
i4 += 8;
const __m128 vk4x0123 = _mm_load_ps(w + 32);
const __m128 vk4x4567 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
const __m128 vi5x4567 = _mm_loadu_ps(i5 + 4);
i5 += 8;
const __m128 vk5x0123 = _mm_load_ps(w + 40);
const __m128 vk5x4567 = _mm_load_ps(w + 44);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi5x4567, vk5x4567));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
const __m128 vi6x4567 = _mm_loadu_ps(i6 + 4);
i6 += 8;
const __m128 vk6x0123 = _mm_load_ps(w + 48);
const __m128 vk6x4567 = _mm_load_ps(w + 52);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi6x4567, vk6x4567));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
const __m128 vi7x4567 = _mm_loadu_ps(i7 + 4);
i7 += 8;
const __m128 vk7x0123 = _mm_load_ps(w + 56);
const __m128 vk7x4567 = _mm_load_ps(w + 60);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi7x0123, vk7x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi7x4567, vk7x4567));
w += 64;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
vacc4567p0 = _mm_add_ps(vacc4567p0, vacc4567p1);
_mm_store_ps(b, vacc0123p0);
_mm_store_ps(b + 4, vacc4567p0);
b += 8;
}
if (c != 0) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 4);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vk5x0123 = _mm_load_ps(w + 20);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vk6x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vk7x0123 = _mm_load_ps(w + 28);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi7x0123, vk7x0123));
w += 32;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Last pass to process up to 9 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m128 vacc0123p0 = _mm_load_ps(b);
__m128 vacc4567p0 = _mm_load_ps(b + 4);
b += 8;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
__m128 vk0x0123 = _mm_load_ps(w);
__m128 vk0x4567 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
__m128 vk1x0123 = _mm_load_ps(w + 8);
__m128 vk1x4567 = _mm_load_ps(w + 12);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
__m128 vacc4567p1 = _mm_mul_ps(vi1x4567, vk1x4567);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
i2 += 8;
__m128 vk2x0123 = _mm_load_ps(w + 16);
__m128 vk2x4567 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
i3 += 8;
__m128 vk3x0123 = _mm_load_ps(w + 24);
__m128 vk3x4567 = _mm_load_ps(w + 28);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi3x4567, vk3x4567));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
i4 += 8;
__m128 vk4x0123 = _mm_load_ps(w + 32);
__m128 vk4x4567 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
const __m128 vi5x4567 = _mm_loadu_ps(i5 + 4);
i5 += 8;
__m128 vk5x0123 = _mm_load_ps(w + 40);
__m128 vk5x4567 = _mm_load_ps(w + 44);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi5x4567, vk5x4567));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
const __m128 vi6x4567 = _mm_loadu_ps(i6 + 4);
i6 += 8;
__m128 vk6x0123 = _mm_load_ps(w + 48);
__m128 vk6x4567 = _mm_load_ps(w + 52);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi6x4567, vk6x4567));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
const __m128 vi7x4567 = _mm_loadu_ps(i7 + 4);
i7 += 8;
__m128 vk7x0123 = _mm_load_ps(w + 56);
__m128 vk7x4567 = _mm_load_ps(w + 60);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi7x0123, vk7x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi7x4567, vk7x4567));
const __m128 vi8x0123 = _mm_loadu_ps(i8);
const __m128 vi8x4567 = _mm_loadu_ps(i8 + 4);
i8 += 8;
__m128 vk8x0123 = _mm_load_ps(w + 64);
__m128 vk8x4567 = _mm_load_ps(w + 68);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi8x4567, vk8x4567));
w += 72;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
vacc4567p0 = _mm_add_ps(vacc4567p0, vacc4567p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
__m128 vacc4567 = _mm_max_ps(vacc4567p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
vacc4567 = _mm_min_ps(vacc4567, vmax);
_mm_storeu_ps(output, vacc0123);
_mm_storeu_ps(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(b);
b += 4;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vk1x0123 = _mm_load_ps(w + 4);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
__m128 vk5x0123 = _mm_load_ps(w + 20);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
i6 += 4;
__m128 vk6x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
i7 += 4;
__m128 vk7x0123 = _mm_load_ps(w + 28);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi7x0123, vk7x0123));
const __m128 vi8x0123 = _mm_loadu_ps(i8);
i8 += 4;
__m128 vk8x0123 = _mm_load_ps(w + 32);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
w += 36;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
__m128 vk1x0123 = _mm_load_ps(w + 4);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
__m128 vk5x0123 = _mm_load_ps(w + 20);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
__m128 vk6x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
__m128 vk7x0123 = _mm_load_ps(w + 28);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi7x0123, vk7x0123));
const __m128 vi8x0123 = _mm_loadu_ps(i8);
__m128 vk8x0123 = _mm_load_ps(w + 32);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
// Add up all accumulators to vacc01234567p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 24,692 | 32.234186 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-8f8m9l8c4s4r-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_8f8m9l8c4s4r__sse(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* w = weights;
// First pass to process 8 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m128 vacc0123p0 = _mm_load_ps(w);
__m128 vacc4567p0 = _mm_load_ps(w + 4);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vk0x0123 = _mm_load_ps(w + 8);
const __m128 vk0x4567 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vk1x0123 = _mm_load_ps(w + 16);
const __m128 vk1x4567 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi1x4567, vk1x4567));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vk2x0123 = _mm_load_ps(w + 24);
const __m128 vk2x4567 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vk3x0123 = _mm_load_ps(w + 32);
const __m128 vk3x4567 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi3x4567, vk3x4567));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
i4 += 8;
const __m128 vk4x0123 = _mm_load_ps(w + 40);
const __m128 vk4x4567 = _mm_load_ps(w + 44);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
const __m128 vi5x4567 = _mm_loadu_ps(i5 + 4);
i5 += 8;
const __m128 vk5x0123 = _mm_load_ps(w + 48);
const __m128 vk5x4567 = _mm_load_ps(w + 52);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi5x4567, vk5x4567));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
const __m128 vi6x4567 = _mm_loadu_ps(i6 + 4);
i6 += 8;
const __m128 vk6x0123 = _mm_load_ps(w + 56);
const __m128 vk6x4567 = _mm_load_ps(w + 60);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi6x4567, vk6x4567));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
const __m128 vi7x4567 = _mm_loadu_ps(i7 + 4);
i7 += 8;
const __m128 vk7x0123 = _mm_load_ps(w + 64);
const __m128 vk7x4567 = _mm_load_ps(w + 68);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi7x0123, vk7x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi7x4567, vk7x4567));
w += 72;
_mm_store_ps(b, vacc0123p0);
_mm_store_ps(b + 4, vacc4567p0);
b += 8;
}
if (c != 0) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vk5x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vk6x0123 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vk7x0123 = _mm_load_ps(w + 32);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi7x0123, vk7x0123));
w += 36;
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m128 vacc0123p0 = _mm_load_ps(b);
__m128 vacc4567p0 = _mm_load_ps(b + 4);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vk0x0123 = _mm_load_ps(w);
const __m128 vk0x4567 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
const __m128 vk1x4567 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi1x4567, vk1x4567));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vk2x0123 = _mm_load_ps(w + 16);
const __m128 vk2x4567 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vk3x0123 = _mm_load_ps(w + 24);
const __m128 vk3x4567 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi3x4567, vk3x4567));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
i4 += 8;
const __m128 vk4x0123 = _mm_load_ps(w + 32);
const __m128 vk4x4567 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
const __m128 vi5x4567 = _mm_loadu_ps(i5 + 4);
i5 += 8;
const __m128 vk5x0123 = _mm_load_ps(w + 40);
const __m128 vk5x4567 = _mm_load_ps(w + 44);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi5x4567, vk5x4567));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
const __m128 vi6x4567 = _mm_loadu_ps(i6 + 4);
i6 += 8;
const __m128 vk6x0123 = _mm_load_ps(w + 48);
const __m128 vk6x4567 = _mm_load_ps(w + 52);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi6x4567, vk6x4567));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
const __m128 vi7x4567 = _mm_loadu_ps(i7 + 4);
i7 += 8;
const __m128 vk7x0123 = _mm_load_ps(w + 56);
const __m128 vk7x4567 = _mm_load_ps(w + 60);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi7x0123, vk7x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi7x4567, vk7x4567));
w += 64;
_mm_store_ps(b, vacc0123p0);
_mm_store_ps(b + 4, vacc4567p0);
b += 8;
}
if (c != 0) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vk5x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vk6x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vk7x0123 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi7x0123, vk7x0123));
w += 32;
_mm_store_ps(b, vacc0123p0);
b += 4;
}
}
// Last pass to process up to 9 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m128 vacc0123p0 = _mm_load_ps(b);
__m128 vacc4567p0 = _mm_load_ps(b + 4);
b += 8;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
__m128 vk0x0123 = _mm_load_ps(w);
__m128 vk0x4567 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
__m128 vk1x0123 = _mm_load_ps(w + 8);
__m128 vk1x4567 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi1x4567, vk1x4567));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
i2 += 8;
__m128 vk2x0123 = _mm_load_ps(w + 16);
__m128 vk2x4567 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
i3 += 8;
__m128 vk3x0123 = _mm_load_ps(w + 24);
__m128 vk3x4567 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi3x4567, vk3x4567));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
i4 += 8;
__m128 vk4x0123 = _mm_load_ps(w + 32);
__m128 vk4x4567 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
const __m128 vi5x4567 = _mm_loadu_ps(i5 + 4);
i5 += 8;
__m128 vk5x0123 = _mm_load_ps(w + 40);
__m128 vk5x4567 = _mm_load_ps(w + 44);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi5x4567, vk5x4567));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
const __m128 vi6x4567 = _mm_loadu_ps(i6 + 4);
i6 += 8;
__m128 vk6x0123 = _mm_load_ps(w + 48);
__m128 vk6x4567 = _mm_load_ps(w + 52);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi6x4567, vk6x4567));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
const __m128 vi7x4567 = _mm_loadu_ps(i7 + 4);
i7 += 8;
__m128 vk7x0123 = _mm_load_ps(w + 56);
__m128 vk7x4567 = _mm_load_ps(w + 60);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi7x0123, vk7x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi7x4567, vk7x4567));
const __m128 vi8x0123 = _mm_loadu_ps(i8);
const __m128 vi8x4567 = _mm_loadu_ps(i8 + 4);
i8 += 8;
__m128 vk8x0123 = _mm_load_ps(w + 64);
__m128 vk8x4567 = _mm_load_ps(w + 68);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi8x4567, vk8x4567));
w += 72;
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
__m128 vacc4567 = _mm_max_ps(vacc4567p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
vacc4567 = _mm_min_ps(vacc4567, vmax);
_mm_storeu_ps(output, vacc0123);
_mm_storeu_ps(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(b);
b += 4;
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vk1x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
__m128 vk5x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
i6 += 4;
__m128 vk6x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
i7 += 4;
__m128 vk7x0123 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi7x0123, vk7x0123));
const __m128 vi8x0123 = _mm_loadu_ps(i8);
i8 += 4;
__m128 vk8x0123 = _mm_load_ps(w + 32);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
w += 36;
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(b);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
__m128 vk0x0123 = _mm_load_ps(w);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
__m128 vk1x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
__m128 vk2x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
__m128 vk3x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
__m128 vk4x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
__m128 vk5x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
__m128 vk6x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
__m128 vk7x0123 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi7x0123, vk7x0123));
const __m128 vi8x0123 = _mm_loadu_ps(i8);
__m128 vk8x0123 = _mm_load_ps(w + 32);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,928 | 31.960055 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-8f8m9l8c8s4r-minmax-avx-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_8f8m9l8c8s4r__avx_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* w = weights;
// First pass to process 8 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
i6 += 8;
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
i7 += 8;
const __m256 vk7x01234567 = _mm256_load_ps(w + 64);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi7x01234567, vk7x01234567));
w += 72;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
const __m256 vi7x01234567 = _mm256_maskload_ps(i7, vmask);
const __m256 vk7x01234567 = _mm256_load_ps(w + 64);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi7x01234567, vk7x01234567));
w += 72;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
i6 += 8;
const __m256 vk6x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
i7 += 8;
const __m256 vk7x01234567 = _mm256_load_ps(w + 56);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi7x01234567, vk7x01234567));
w += 64;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
const __m256 vk6x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
const __m256 vi7x01234567 = _mm256_maskload_ps(i7, vmask);
const __m256 vk7x01234567 = _mm256_load_ps(w + 56);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi7x01234567, vk7x01234567));
w += 64;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
_mm256_store_ps(b, vacc01234567p0);
}
}
// Last pass to process up to 9 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
b += 8;
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
__m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
i6 += 8;
__m256 vk6x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
i7 += 8;
__m256 vk7x01234567 = _mm256_load_ps(w + 56);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi7x01234567, vk7x01234567));
const __m256 vi8x01234567 = _mm256_loadu_ps(i8);
i8 += 8;
__m256 vk8x01234567 = _mm256_load_ps(w + 64);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi8x01234567, vk8x01234567));
w += 72;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
__m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
__m256 vk6x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
const __m256 vi7x01234567 = _mm256_maskload_ps(i7, vmask);
__m256 vk7x01234567 = _mm256_load_ps(w + 56);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi7x01234567, vk7x01234567));
const __m256 vi8x01234567 = _mm256_maskload_ps(i8, vmask);
__m256 vk8x01234567 = _mm256_load_ps(w + 64);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi8x01234567, vk8x01234567));
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 20,136 | 33.960069 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-8f8m9l8c8s4r-minmax-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_8f8m9l8c8s4r__avx(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* w = weights;
// First pass to process 8 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
i6 += 8;
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
i7 += 8;
const __m256 vk7x01234567 = _mm256_load_ps(w + 64);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi7x01234567, vk7x01234567));
w += 72;
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
const __m256 vi7x01234567 = _mm256_maskload_ps(i7, vmask);
const __m256 vk7x01234567 = _mm256_load_ps(w + 64);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi7x01234567, vk7x01234567));
w += 72;
_mm256_store_ps(b, vacc01234567p0);
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
i6 += 8;
const __m256 vk6x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
i7 += 8;
const __m256 vk7x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi7x01234567, vk7x01234567));
w += 64;
_mm256_store_ps(b, vacc01234567p0);
b += 8;
}
if (c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
const __m256 vk6x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
const __m256 vi7x01234567 = _mm256_maskload_ps(i7, vmask);
const __m256 vk7x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi7x01234567, vk7x01234567));
w += 64;
_mm256_store_ps(b, vacc01234567p0);
}
}
// Last pass to process up to 9 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(b);
b += 8;
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
__m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
i6 += 8;
__m256 vk6x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
i7 += 8;
__m256 vk7x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi7x01234567, vk7x01234567));
const __m256 vi8x01234567 = _mm256_loadu_ps(i8);
i8 += 8;
__m256 vk8x01234567 = _mm256_load_ps(w + 64);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi8x01234567, vk8x01234567));
w += 72;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
__m256 vacc01234567p0 = _mm256_load_ps(b);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
__m256 vk0x01234567 = _mm256_load_ps(w);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
__m256 vk1x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
__m256 vk2x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
__m256 vk3x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
__m256 vk4x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
__m256 vk5x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
__m256 vk6x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
const __m256 vi7x01234567 = _mm256_maskload_ps(i7, vmask);
__m256 vk7x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi7x01234567, vk7x01234567));
const __m256 vi8x01234567 = _mm256_maskload_ps(i8, vmask);
__m256 vk8x01234567 = _mm256_load_ps(w + 64);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi8x01234567, vk8x01234567));
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 19,525 | 33.620567 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p16c-minmax-avx-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p16c__avx_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(w + 8);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
const __m256 vk0x89ABCDEF = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi0x89ABCDEF, vk0x89ABCDEF));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
const __m256 vk1x89ABCDEF = _mm256_load_ps(w + 40);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
__m256 vacc89ABCDEFp1 = _mm256_mul_ps(vi1x89ABCDEF, vk1x89ABCDEF);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
const __m256 vk2x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi2x89ABCDEF, vk2x89ABCDEF));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
i3 += 16;
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
const __m256 vk3x89ABCDEF = _mm256_load_ps(w + 72);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
vacc89ABCDEFp1 = _mm256_add_ps(vacc89ABCDEFp1, _mm256_mul_ps(vi3x89ABCDEF, vk3x89ABCDEF));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8);
i4 += 16;
const __m256 vk4x01234567 = _mm256_load_ps(w + 80);
const __m256 vk4x89ABCDEF = _mm256_load_ps(w + 88);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi4x89ABCDEF, vk4x89ABCDEF));
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
const __m256 vi5x89ABCDEF = _mm256_loadu_ps(i5 + 8);
i5 += 16;
const __m256 vk5x01234567 = _mm256_load_ps(w + 96);
const __m256 vk5x89ABCDEF = _mm256_load_ps(w + 104);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi5x01234567, vk5x01234567));
vacc89ABCDEFp1 = _mm256_add_ps(vacc89ABCDEFp1, _mm256_mul_ps(vi5x89ABCDEF, vk5x89ABCDEF));
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
const __m256 vi6x89ABCDEF = _mm256_loadu_ps(i6 + 8);
i6 += 16;
const __m256 vk6x01234567 = _mm256_load_ps(w + 112);
const __m256 vk6x89ABCDEF = _mm256_load_ps(w + 120);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi6x89ABCDEF, vk6x89ABCDEF));
const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
const __m256 vi7x89ABCDEF = _mm256_loadu_ps(i7 + 8);
i7 += 16;
const __m256 vk7x01234567 = _mm256_load_ps(w + 128);
const __m256 vk7x89ABCDEF = _mm256_load_ps(w + 136);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi7x01234567, vk7x01234567));
vacc89ABCDEFp1 = _mm256_add_ps(vacc89ABCDEFp1, _mm256_mul_ps(vi7x89ABCDEF, vk7x89ABCDEF));
const __m256 vi8x01234567 = _mm256_loadu_ps(i8);
const __m256 vi8x89ABCDEF = _mm256_loadu_ps(i8 + 8);
i8 += 16;
const __m256 vk8x01234567 = _mm256_load_ps(w + 144);
const __m256 vk8x89ABCDEF = _mm256_load_ps(w + 152);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi8x01234567, vk8x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi8x89ABCDEF, vk8x89ABCDEF));
w += 160;
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, vacc89ABCDEFp1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
__m256 vacc89ABCDEF = _mm256_max_ps(vmin, vacc89ABCDEFp0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
vacc89ABCDEF = _mm256_min_ps(vmax, vacc89ABCDEF);
_mm256_storeu_ps(output, vacc01234567);
_mm256_storeu_ps(output + 8, vacc89ABCDEF);
output += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 80);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 96);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
i6 += 8;
const __m256 vk6x01234567 = _mm256_load_ps(w + 112);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
i7 += 8;
const __m256 vk7x01234567 = _mm256_load_ps(w + 128);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi7x01234567, vk7x01234567));
const __m256 vi8x01234567 = _mm256_loadu_ps(i8);
i8 += 8;
const __m256 vk8x01234567 = _mm256_load_ps(w + 144);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi8x01234567, vk8x01234567));
w += 8;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 80);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 96);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
const __m256 vk6x01234567 = _mm256_load_ps(w + 112);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
const __m256 vi7x01234567 = _mm256_maskload_ps(i7, vmask);
const __m256 vk7x01234567 = _mm256_load_ps(w + 128);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi7x01234567, vk7x01234567));
const __m256 vi8x01234567 = _mm256_maskload_ps(i8, vmask);
const __m256 vk8x01234567 = _mm256_load_ps(w + 144);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi8x01234567, vk8x01234567));
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 12,525 | 37.900621 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p16c-minmax-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p16c__avx(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(w + 8);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
const __m256 vk0x89ABCDEF = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi0x89ABCDEF, vk0x89ABCDEF));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
const __m256 vk1x89ABCDEF = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi1x89ABCDEF, vk1x89ABCDEF));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
const __m256 vk2x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi2x89ABCDEF, vk2x89ABCDEF));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
i3 += 16;
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
const __m256 vk3x89ABCDEF = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi3x89ABCDEF, vk3x89ABCDEF));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8);
i4 += 16;
const __m256 vk4x01234567 = _mm256_load_ps(w + 80);
const __m256 vk4x89ABCDEF = _mm256_load_ps(w + 88);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi4x89ABCDEF, vk4x89ABCDEF));
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
const __m256 vi5x89ABCDEF = _mm256_loadu_ps(i5 + 8);
i5 += 16;
const __m256 vk5x01234567 = _mm256_load_ps(w + 96);
const __m256 vk5x89ABCDEF = _mm256_load_ps(w + 104);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi5x01234567, vk5x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi5x89ABCDEF, vk5x89ABCDEF));
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
const __m256 vi6x89ABCDEF = _mm256_loadu_ps(i6 + 8);
i6 += 16;
const __m256 vk6x01234567 = _mm256_load_ps(w + 112);
const __m256 vk6x89ABCDEF = _mm256_load_ps(w + 120);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi6x89ABCDEF, vk6x89ABCDEF));
const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
const __m256 vi7x89ABCDEF = _mm256_loadu_ps(i7 + 8);
i7 += 16;
const __m256 vk7x01234567 = _mm256_load_ps(w + 128);
const __m256 vk7x89ABCDEF = _mm256_load_ps(w + 136);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi7x01234567, vk7x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi7x89ABCDEF, vk7x89ABCDEF));
const __m256 vi8x01234567 = _mm256_loadu_ps(i8);
const __m256 vi8x89ABCDEF = _mm256_loadu_ps(i8 + 8);
i8 += 16;
const __m256 vk8x01234567 = _mm256_load_ps(w + 144);
const __m256 vk8x89ABCDEF = _mm256_load_ps(w + 152);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi8x01234567, vk8x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi8x89ABCDEF, vk8x89ABCDEF));
w += 160;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
__m256 vacc89ABCDEF = _mm256_max_ps(vmin, vacc89ABCDEFp0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
vacc89ABCDEF = _mm256_min_ps(vmax, vacc89ABCDEF);
_mm256_storeu_ps(output, vacc01234567);
_mm256_storeu_ps(output + 8, vacc89ABCDEF);
output += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 80);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 96);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
i6 += 8;
const __m256 vk6x01234567 = _mm256_load_ps(w + 112);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
i7 += 8;
const __m256 vk7x01234567 = _mm256_load_ps(w + 128);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi7x01234567, vk7x01234567));
const __m256 vi8x01234567 = _mm256_loadu_ps(i8);
i8 += 8;
const __m256 vk8x01234567 = _mm256_load_ps(w + 144);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi8x01234567, vk8x01234567));
w += 8;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 80);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 96);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
const __m256 vk6x01234567 = _mm256_load_ps(w + 112);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
const __m256 vi7x01234567 = _mm256_maskload_ps(i7, vmask);
const __m256 vk7x01234567 = _mm256_load_ps(w + 128);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi7x01234567, vk7x01234567));
const __m256 vi8x01234567 = _mm256_maskload_ps(i8, vmask);
const __m256 vk8x01234567 = _mm256_load_ps(w + 144);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi8x01234567, vk8x01234567));
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 12,175 | 37.653968 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p16c-minmax-avx512f-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx512.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_dwconv_minmax_ukernel_9p16c__avx512f_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
__m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0);
i0 += 16;
const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 16);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1);
i1 += 16;
const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 32);
__m512 vacc0123456789ABCDEFp1 = _mm512_mul_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2);
i2 += 16;
const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 48);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi3x0123456789ABCDEF = _mm512_loadu_ps(i3);
i3 += 16;
const __m512 vk3x0123456789ABCDEF = _mm512_load_ps(w + 64);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi4x0123456789ABCDEF = _mm512_loadu_ps(i4);
i4 += 16;
const __m512 vk4x0123456789ABCDEF = _mm512_load_ps(w + 80);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi5x0123456789ABCDEF = _mm512_loadu_ps(i5);
i5 += 16;
const __m512 vk5x0123456789ABCDEF = _mm512_load_ps(w + 96);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi6x0123456789ABCDEF = _mm512_loadu_ps(i6);
i6 += 16;
const __m512 vk6x0123456789ABCDEF = _mm512_load_ps(w + 112);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi7x0123456789ABCDEF = _mm512_loadu_ps(i7);
i7 += 16;
const __m512 vk7x0123456789ABCDEF = _mm512_load_ps(w + 128);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi8x0123456789ABCDEF = _mm512_loadu_ps(i8);
i8 += 16;
const __m512 vk8x0123456789ABCDEF = _mm512_load_ps(w + 144);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF, vacc0123456789ABCDEFp0);
w += 160;
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc0123456789ABCDEFp0 = _mm512_add_ps(vacc0123456789ABCDEFp0, vacc0123456789ABCDEFp1);
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 16);
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
__m512 vacc0123456789ABCDEFp0 = _mm512_maskz_loadu_ps(vmask, w);
const __m512 vi0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i0);
const __m512 vk0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 16);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i1);
const __m512 vk1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 32);
__m512 vacc0123456789ABCDEFp1 = _mm512_mul_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
const __m512 vi2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i2);
const __m512 vk2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 48);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i3);
const __m512 vk3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 64);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi4x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i4);
const __m512 vk4x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 80);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi5x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i5);
const __m512 vk5x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 96);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi6x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i6);
const __m512 vk6x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 112);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi7x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i7);
const __m512 vk7x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 128);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi8x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i8);
const __m512 vk8x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 144);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF, vacc0123456789ABCDEFp0);
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc0123456789ABCDEFp0 = _mm512_add_ps(vacc0123456789ABCDEFp0, vacc0123456789ABCDEFp1);
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_mask_storeu_ps(output, vmask, vacc0123456789ABCDEF);
output += c;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,593 | 39.729858 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p16c-minmax-avx512f.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx512.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_dwconv_minmax_ukernel_9p16c__avx512f(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
__m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0);
i0 += 16;
const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 16);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1);
i1 += 16;
const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 32);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2);
i2 += 16;
const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 48);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi3x0123456789ABCDEF = _mm512_loadu_ps(i3);
i3 += 16;
const __m512 vk3x0123456789ABCDEF = _mm512_load_ps(w + 64);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi4x0123456789ABCDEF = _mm512_loadu_ps(i4);
i4 += 16;
const __m512 vk4x0123456789ABCDEF = _mm512_load_ps(w + 80);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi5x0123456789ABCDEF = _mm512_loadu_ps(i5);
i5 += 16;
const __m512 vk5x0123456789ABCDEF = _mm512_load_ps(w + 96);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi6x0123456789ABCDEF = _mm512_loadu_ps(i6);
i6 += 16;
const __m512 vk6x0123456789ABCDEF = _mm512_load_ps(w + 112);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi7x0123456789ABCDEF = _mm512_loadu_ps(i7);
i7 += 16;
const __m512 vk7x0123456789ABCDEF = _mm512_load_ps(w + 128);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi8x0123456789ABCDEF = _mm512_loadu_ps(i8);
i8 += 16;
const __m512 vk8x0123456789ABCDEF = _mm512_load_ps(w + 144);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF, vacc0123456789ABCDEFp0);
w += 160;
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 16);
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
__m512 vacc0123456789ABCDEFp0 = _mm512_maskz_loadu_ps(vmask, w);
const __m512 vi0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i0);
const __m512 vk0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 16);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i1);
const __m512 vk1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 32);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i2);
const __m512 vk2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 48);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i3);
const __m512 vk3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 64);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi4x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i4);
const __m512 vk4x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 80);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi5x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i5);
const __m512 vk5x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 96);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi6x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i6);
const __m512 vk6x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 112);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi7x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i7);
const __m512 vk7x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 128);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi8x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i8);
const __m512 vk8x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 144);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF, vacc0123456789ABCDEFp0);
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_mask_storeu_ps(output, vmask, vacc0123456789ABCDEF);
output += c;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,320 | 39.198068 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p16c-minmax-fma3-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p16c__fma3_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(w + 8);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
const __m256 vk0x89ABCDEF = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi0x89ABCDEF, vk0x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
const __m256 vk1x89ABCDEF = _mm256_load_ps(w + 40);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
__m256 vacc89ABCDEFp1 = _mm256_mul_ps(vi1x89ABCDEF, vk1x89ABCDEF);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
const __m256 vk2x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi2x89ABCDEF, vk2x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
i3 += 16;
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
const __m256 vk3x89ABCDEF = _mm256_load_ps(w + 72);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
vacc89ABCDEFp1 = _mm256_fmadd_ps(vi3x89ABCDEF, vk3x89ABCDEF, vacc89ABCDEFp1);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8);
i4 += 16;
const __m256 vk4x01234567 = _mm256_load_ps(w + 80);
const __m256 vk4x89ABCDEF = _mm256_load_ps(w + 88);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi4x89ABCDEF, vk4x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
const __m256 vi5x89ABCDEF = _mm256_loadu_ps(i5 + 8);
i5 += 16;
const __m256 vk5x01234567 = _mm256_load_ps(w + 96);
const __m256 vk5x89ABCDEF = _mm256_load_ps(w + 104);
vacc01234567p1 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p1);
vacc89ABCDEFp1 = _mm256_fmadd_ps(vi5x89ABCDEF, vk5x89ABCDEF, vacc89ABCDEFp1);
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
const __m256 vi6x89ABCDEF = _mm256_loadu_ps(i6 + 8);
i6 += 16;
const __m256 vk6x01234567 = _mm256_load_ps(w + 112);
const __m256 vk6x89ABCDEF = _mm256_load_ps(w + 120);
vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi6x89ABCDEF, vk6x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
const __m256 vi7x89ABCDEF = _mm256_loadu_ps(i7 + 8);
i7 += 16;
const __m256 vk7x01234567 = _mm256_load_ps(w + 128);
const __m256 vk7x89ABCDEF = _mm256_load_ps(w + 136);
vacc01234567p1 = _mm256_fmadd_ps(vi7x01234567, vk7x01234567, vacc01234567p1);
vacc89ABCDEFp1 = _mm256_fmadd_ps(vi7x89ABCDEF, vk7x89ABCDEF, vacc89ABCDEFp1);
const __m256 vi8x01234567 = _mm256_loadu_ps(i8);
const __m256 vi8x89ABCDEF = _mm256_loadu_ps(i8 + 8);
i8 += 16;
const __m256 vk8x01234567 = _mm256_load_ps(w + 144);
const __m256 vk8x89ABCDEF = _mm256_load_ps(w + 152);
vacc01234567p0 = _mm256_fmadd_ps(vi8x01234567, vk8x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi8x89ABCDEF, vk8x89ABCDEF, vacc89ABCDEFp0);
w += 160;
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, vacc89ABCDEFp1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
__m256 vacc89ABCDEF = _mm256_max_ps(vmin, vacc89ABCDEFp0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
vacc89ABCDEF = _mm256_min_ps(vmax, vacc89ABCDEF);
_mm256_storeu_ps(output, vacc01234567);
_mm256_storeu_ps(output + 8, vacc89ABCDEF);
output += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 80);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 96);
vacc01234567p1 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p1);
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
i6 += 8;
const __m256 vk6x01234567 = _mm256_load_ps(w + 112);
vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
i7 += 8;
const __m256 vk7x01234567 = _mm256_load_ps(w + 128);
vacc01234567p1 = _mm256_fmadd_ps(vi7x01234567, vk7x01234567, vacc01234567p1);
const __m256 vi8x01234567 = _mm256_loadu_ps(i8);
i8 += 8;
const __m256 vk8x01234567 = _mm256_load_ps(w + 144);
vacc01234567p0 = _mm256_fmadd_ps(vi8x01234567, vk8x01234567, vacc01234567p0);
w += 8;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 80);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 96);
vacc01234567p1 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p1);
const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
const __m256 vk6x01234567 = _mm256_load_ps(w + 112);
vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
const __m256 vi7x01234567 = _mm256_maskload_ps(i7, vmask);
const __m256 vk7x01234567 = _mm256_load_ps(w + 128);
vacc01234567p1 = _mm256_fmadd_ps(vi7x01234567, vk7x01234567, vacc01234567p1);
const __m256 vi8x01234567 = _mm256_maskload_ps(i8, vmask);
const __m256 vk8x01234567 = _mm256_load_ps(w + 144);
vacc01234567p0 = _mm256_fmadd_ps(vi8x01234567, vk8x01234567, vacc01234567p0);
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 12,110 | 36.611801 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p16c-minmax-fma3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p16c__fma3(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(w + 8);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
const __m256 vk0x89ABCDEF = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi0x89ABCDEF, vk0x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
const __m256 vk1x89ABCDEF = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi1x89ABCDEF, vk1x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
const __m256 vk2x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi2x89ABCDEF, vk2x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
i3 += 16;
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
const __m256 vk3x89ABCDEF = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi3x89ABCDEF, vk3x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8);
i4 += 16;
const __m256 vk4x01234567 = _mm256_load_ps(w + 80);
const __m256 vk4x89ABCDEF = _mm256_load_ps(w + 88);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi4x89ABCDEF, vk4x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
const __m256 vi5x89ABCDEF = _mm256_loadu_ps(i5 + 8);
i5 += 16;
const __m256 vk5x01234567 = _mm256_load_ps(w + 96);
const __m256 vk5x89ABCDEF = _mm256_load_ps(w + 104);
vacc01234567p0 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi5x89ABCDEF, vk5x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
const __m256 vi6x89ABCDEF = _mm256_loadu_ps(i6 + 8);
i6 += 16;
const __m256 vk6x01234567 = _mm256_load_ps(w + 112);
const __m256 vk6x89ABCDEF = _mm256_load_ps(w + 120);
vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi6x89ABCDEF, vk6x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
const __m256 vi7x89ABCDEF = _mm256_loadu_ps(i7 + 8);
i7 += 16;
const __m256 vk7x01234567 = _mm256_load_ps(w + 128);
const __m256 vk7x89ABCDEF = _mm256_load_ps(w + 136);
vacc01234567p0 = _mm256_fmadd_ps(vi7x01234567, vk7x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi7x89ABCDEF, vk7x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi8x01234567 = _mm256_loadu_ps(i8);
const __m256 vi8x89ABCDEF = _mm256_loadu_ps(i8 + 8);
i8 += 16;
const __m256 vk8x01234567 = _mm256_load_ps(w + 144);
const __m256 vk8x89ABCDEF = _mm256_load_ps(w + 152);
vacc01234567p0 = _mm256_fmadd_ps(vi8x01234567, vk8x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi8x89ABCDEF, vk8x89ABCDEF, vacc89ABCDEFp0);
w += 160;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
__m256 vacc89ABCDEF = _mm256_max_ps(vmin, vacc89ABCDEFp0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
vacc89ABCDEF = _mm256_min_ps(vmax, vacc89ABCDEF);
_mm256_storeu_ps(output, vacc01234567);
_mm256_storeu_ps(output + 8, vacc89ABCDEF);
output += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 80);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 96);
vacc01234567p0 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0);
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
i6 += 8;
const __m256 vk6x01234567 = _mm256_load_ps(w + 112);
vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
i7 += 8;
const __m256 vk7x01234567 = _mm256_load_ps(w + 128);
vacc01234567p0 = _mm256_fmadd_ps(vi7x01234567, vk7x01234567, vacc01234567p0);
const __m256 vi8x01234567 = _mm256_loadu_ps(i8);
i8 += 8;
const __m256 vk8x01234567 = _mm256_load_ps(w + 144);
vacc01234567p0 = _mm256_fmadd_ps(vi8x01234567, vk8x01234567, vacc01234567p0);
w += 8;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 80);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 96);
vacc01234567p0 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0);
const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
const __m256 vk6x01234567 = _mm256_load_ps(w + 112);
vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
const __m256 vi7x01234567 = _mm256_maskload_ps(i7, vmask);
const __m256 vk7x01234567 = _mm256_load_ps(w + 128);
vacc01234567p0 = _mm256_fmadd_ps(vi7x01234567, vk7x01234567, vacc01234567p0);
const __m256 vi8x01234567 = _mm256_maskload_ps(i8, vmask);
const __m256 vk8x01234567 = _mm256_load_ps(w + 144);
vacc01234567p0 = _mm256_fmadd_ps(vi8x01234567, vk8x01234567, vacc01234567p0);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,708 | 36.171429 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p16c-minmax-neon-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p16c__neon_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
float32x4_t vacc89ABp0 = vld1q_f32(w); w += 4;
float32x4_t vaccCDEFp0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
float32x4_t vacc89ABp1 = vmulq_f32(vi1x89AB, vk1x89AB);
float32x4_t vaccCDEFp1 = vmulq_f32(vi1xCDEF, vk1xCDEF);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2xCDEF = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk2x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk2xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi2x89AB, vk2x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi2xCDEF, vk2xCDEF);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3xCDEF = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk3x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk3xCDEF = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi3x4567, vk3x4567);
vacc89ABp1 = vmlaq_f32(vacc89ABp1, vi3x89AB, vk3x89AB);
vaccCDEFp1 = vmlaq_f32(vaccCDEFp1, vi3xCDEF, vk3xCDEF);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4xCDEF = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk4x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk4xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi4x89AB, vk4x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi4xCDEF, vk4xCDEF);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x89AB = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5xCDEF = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk5x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk5xCDEF = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi5x4567, vk5x4567);
vacc89ABp1 = vmlaq_f32(vacc89ABp1, vi5x89AB, vk5x89AB);
vaccCDEFp1 = vmlaq_f32(vaccCDEFp1, vi5xCDEF, vk5xCDEF);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x89AB = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6xCDEF = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk6x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk6xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi6x4567, vk6x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi6x89AB, vk6x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi6xCDEF, vk6xCDEF);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x89AB = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7xCDEF = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk7x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk7xCDEF = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi7x4567, vk7x4567);
vacc89ABp1 = vmlaq_f32(vacc89ABp1, vi7x89AB, vk7x89AB);
vaccCDEFp1 = vmlaq_f32(vaccCDEFp1, vi7xCDEF, vk7xCDEF);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vi8x4567 = vld1q_f32(i8); i8 += 4;
const float32x4_t vi8x89AB = vld1q_f32(i8); i8 += 4;
const float32x4_t vi8xCDEF = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk8x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk8x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk8xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi8x4567, vk8x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi8x89AB, vk8x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi8xCDEF, vk8xCDEF);
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vacc89ABp0 = vaddq_f32(vacc89ABp0, vacc89ABp1);
vaccCDEFp0 = vaddq_f32(vaccCDEFp0, vaccCDEFp1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
float32x4_t vacc89AB = vmaxq_f32(vacc89ABp0, vmin);
float32x4_t vaccCDEF = vmaxq_f32(vaccCDEFp0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vacc89AB = vminq_f32(vacc89AB, vmax);
vaccCDEF = vminq_f32(vaccCDEF, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
vst1q_f32(output, vacc89AB); output += 4;
vst1q_f32(output, vaccCDEF); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 12);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 28);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 44);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w + 60);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w + 76);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w + 92);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w + 108);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w + 124);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w + 140);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 16);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 32);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 48);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w + 64);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
const float32x4_t vk4x0123 = vld1q_f32(w + 80);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
const float32x4_t vk5x0123 = vld1q_f32(w + 96);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
const float32x4_t vk6x0123 = vld1q_f32(w + 112);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
const float32x4_t vk7x0123 = vld1q_f32(w + 128);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
const float32x4_t vk8x0123 = vld1q_f32(w + 144);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 14,086 | 40.925595 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p16c-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p16c__neon(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
float32x4_t vacc89ABp0 = vld1q_f32(w); w += 4;
float32x4_t vaccCDEFp0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi1x4567, vk1x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi1x89AB, vk1x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi1xCDEF, vk1xCDEF);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2xCDEF = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk2x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk2xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi2x89AB, vk2x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi2xCDEF, vk2xCDEF);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3xCDEF = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk3x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk3xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi3x4567, vk3x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi3x89AB, vk3x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi3xCDEF, vk3xCDEF);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4xCDEF = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk4x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk4xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi4x89AB, vk4x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi4xCDEF, vk4xCDEF);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x89AB = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5xCDEF = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk5x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk5xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi5x4567, vk5x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi5x89AB, vk5x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi5xCDEF, vk5xCDEF);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x89AB = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6xCDEF = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk6x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk6xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi6x4567, vk6x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi6x89AB, vk6x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi6xCDEF, vk6xCDEF);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x89AB = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7xCDEF = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk7x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk7xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi7x0123, vk7x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi7x4567, vk7x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi7x89AB, vk7x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi7xCDEF, vk7xCDEF);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vi8x4567 = vld1q_f32(i8); i8 += 4;
const float32x4_t vi8x89AB = vld1q_f32(i8); i8 += 4;
const float32x4_t vi8xCDEF = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk8x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk8x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk8xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi8x4567, vk8x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi8x89AB, vk8x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi8xCDEF, vk8xCDEF);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
float32x4_t vacc89AB = vmaxq_f32(vacc89ABp0, vmin);
float32x4_t vaccCDEF = vmaxq_f32(vaccCDEFp0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vacc89AB = vminq_f32(vacc89AB, vmax);
vaccCDEF = vminq_f32(vaccCDEF, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
vst1q_f32(output, vacc89AB); output += 4;
vst1q_f32(output, vaccCDEF); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 12);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 28);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 44);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w + 60);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w + 76);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w + 92);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w + 108);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w + 124);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w + 140);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 16);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 32);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 48);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w + 64);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
const float32x4_t vk4x0123 = vld1q_f32(w + 80);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
const float32x4_t vk5x0123 = vld1q_f32(w + 96);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
const float32x4_t vk6x0123 = vld1q_f32(w + 112);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
const float32x4_t vk7x0123 = vld1q_f32(w + 128);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
const float32x4_t vk8x0123 = vld1q_f32(w + 144);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 13,604 | 40.605505 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p16c-minmax-neonfma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p16c__neonfma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
float32x4_t vacc89ABp0 = vld1q_f32(w); w += 4;
float32x4_t vaccCDEFp0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
float32x4_t vacc89ABp1 = vmulq_f32(vi1x89AB, vk1x89AB);
float32x4_t vaccCDEFp1 = vmulq_f32(vi1xCDEF, vk1xCDEF);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2xCDEF = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk2x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk2xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi2x89AB, vk2x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi2xCDEF, vk2xCDEF);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3xCDEF = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk3x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk3xCDEF = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi3x4567, vk3x4567);
vacc89ABp1 = vfmaq_f32(vacc89ABp1, vi3x89AB, vk3x89AB);
vaccCDEFp1 = vfmaq_f32(vaccCDEFp1, vi3xCDEF, vk3xCDEF);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4xCDEF = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk4x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk4xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi4x89AB, vk4x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi4xCDEF, vk4xCDEF);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x89AB = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5xCDEF = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk5x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk5xCDEF = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi5x4567, vk5x4567);
vacc89ABp1 = vfmaq_f32(vacc89ABp1, vi5x89AB, vk5x89AB);
vaccCDEFp1 = vfmaq_f32(vaccCDEFp1, vi5xCDEF, vk5xCDEF);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x89AB = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6xCDEF = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk6x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk6xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi6x4567, vk6x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi6x89AB, vk6x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi6xCDEF, vk6xCDEF);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x89AB = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7xCDEF = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk7x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk7xCDEF = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi7x4567, vk7x4567);
vacc89ABp1 = vfmaq_f32(vacc89ABp1, vi7x89AB, vk7x89AB);
vaccCDEFp1 = vfmaq_f32(vaccCDEFp1, vi7xCDEF, vk7xCDEF);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vi8x4567 = vld1q_f32(i8); i8 += 4;
const float32x4_t vi8x89AB = vld1q_f32(i8); i8 += 4;
const float32x4_t vi8xCDEF = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk8x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk8x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk8xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi8x4567, vk8x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi8x89AB, vk8x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi8xCDEF, vk8xCDEF);
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vacc89ABp0 = vaddq_f32(vacc89ABp0, vacc89ABp1);
vaccCDEFp0 = vaddq_f32(vaccCDEFp0, vaccCDEFp1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
float32x4_t vacc89AB = vmaxq_f32(vacc89ABp0, vmin);
float32x4_t vaccCDEF = vmaxq_f32(vaccCDEFp0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vacc89AB = vminq_f32(vacc89AB, vmax);
vaccCDEF = vminq_f32(vaccCDEF, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
vst1q_f32(output, vacc89AB); output += 4;
vst1q_f32(output, vaccCDEF); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 12);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 28);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 44);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w + 60);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w + 76);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w + 92);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w + 108);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w + 124);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w + 140);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 16);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 32);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 48);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w + 64);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
const float32x4_t vk4x0123 = vld1q_f32(w + 80);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
const float32x4_t vk5x0123 = vld1q_f32(w + 96);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
const float32x4_t vk6x0123 = vld1q_f32(w + 112);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
const float32x4_t vk7x0123 = vld1q_f32(w + 128);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
const float32x4_t vk8x0123 = vld1q_f32(w + 144);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 14,089 | 40.934524 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p16c-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p16c__neonfma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
float32x4_t vacc89ABp0 = vld1q_f32(w); w += 4;
float32x4_t vaccCDEFp0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi1x4567, vk1x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi1x89AB, vk1x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi1xCDEF, vk1xCDEF);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2xCDEF = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk2x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk2xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi2x89AB, vk2x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi2xCDEF, vk2xCDEF);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3xCDEF = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk3x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk3xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi3x4567, vk3x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi3x89AB, vk3x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi3xCDEF, vk3xCDEF);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4xCDEF = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk4x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk4xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi4x89AB, vk4x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi4xCDEF, vk4xCDEF);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x89AB = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5xCDEF = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk5x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk5xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi5x4567, vk5x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi5x89AB, vk5x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi5xCDEF, vk5xCDEF);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x89AB = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6xCDEF = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk6x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk6xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi6x4567, vk6x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi6x89AB, vk6x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi6xCDEF, vk6xCDEF);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x89AB = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7xCDEF = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk7x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk7xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi7x4567, vk7x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi7x89AB, vk7x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi7xCDEF, vk7xCDEF);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vi8x4567 = vld1q_f32(i8); i8 += 4;
const float32x4_t vi8x89AB = vld1q_f32(i8); i8 += 4;
const float32x4_t vi8xCDEF = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk8x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk8x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk8xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi8x4567, vk8x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi8x89AB, vk8x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi8xCDEF, vk8xCDEF);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
float32x4_t vacc89AB = vmaxq_f32(vacc89ABp0, vmin);
float32x4_t vaccCDEF = vmaxq_f32(vaccCDEFp0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vacc89AB = vminq_f32(vacc89AB, vmax);
vaccCDEF = vminq_f32(vaccCDEF, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
vst1q_f32(output, vacc89AB); output += 4;
vst1q_f32(output, vaccCDEF); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 12);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 28);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 44);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w + 60);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w + 76);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w + 92);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w + 108);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w + 124);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w + 140);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 16);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 32);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 48);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w + 64);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
const float32x4_t vk4x0123 = vld1q_f32(w + 80);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
const float32x4_t vk5x0123 = vld1q_f32(w + 96);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
const float32x4_t vk6x0123 = vld1q_f32(w + 112);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
const float32x4_t vk7x0123 = vld1q_f32(w + 128);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
const float32x4_t vk8x0123 = vld1q_f32(w + 144);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 13,607 | 40.614679 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p1c-minmax-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_9p1c__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
do {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[6];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
const float vi6 = *i6++;
const float vk6 = w[7];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[8];
vacc0p1 = math_muladd_f32(vi7, vk7, vacc0p1);
const float vi8 = *i8++;
const float vk8 = w[9];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
w += 10;
vacc0p0 += vacc0p1;
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,742 | 26.932836 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p1c-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_9p1c__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
do {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[6];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
const float vi6 = *i6++;
const float vk6 = w[7];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[8];
vacc0p0 = math_muladd_f32(vi7, vk7, vacc0p0);
const float vi8 = *i8++;
const float vk8 = w[9];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
w += 10;
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,730 | 27.052632 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p1c-minmax-wasm-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_9p1c__wasm_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
do {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[6];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
const float vi6 = *i6++;
const float vk6 = w[7];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[8];
vacc0p1 = math_muladd_f32(vi7, vk7, vacc0p1);
const float vi8 = *i8++;
const float vk8 = w[9];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
w += 10;
vacc0p0 += vacc0p1;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
*output++ = vacc0;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,760 | 27.067164 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p1c-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_9p1c__wasm(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
do {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[6];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
const float vi6 = *i6++;
const float vk6 = w[7];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[8];
vacc0p0 = math_muladd_f32(vi7, vk7, vacc0p0);
const float vi8 = *i8++;
const float vk8 = w[9];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
w += 10;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
*output++ = vacc0;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,748 | 27.18797 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p1c-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_9p1c__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
do {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[6];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
const float vi6 = *i6++;
const float vk6 = w[7];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[8];
vacc0p1 = math_muladd_f32(vi7, vk7, vacc0p1);
const float vi8 = *i8++;
const float vk8 = w[9];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
w += 10;
vacc0p0 += vacc0p1;
*output++ = vacc0p0;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,566 | 26.438462 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p2c-minmax-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_9p2c__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 2; c -= 2) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
i0 += 2;
const float vk0x0 = w[2];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vk0x1 = w[3];
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
i1 += 2;
const float vk1x0 = w[4];
float vacc0p1 = vi1x0 * vk1x0;
const float vk1x1 = w[5];
float vacc1p1 = vi1x1 * vk1x1;
const float vi2x0 = i2[0];
const float vi2x1 = i2[1];
i2 += 2;
const float vk2x0 = w[6];
vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
const float vk2x1 = w[7];
vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
const float vi3x0 = i3[0];
const float vi3x1 = i3[1];
i3 += 2;
const float vk3x0 = w[8];
vacc0p1 = math_muladd_f32(vi3x0, vk3x0, vacc0p1);
const float vk3x1 = w[9];
vacc1p1 = math_muladd_f32(vi3x1, vk3x1, vacc1p1);
const float vi4x0 = i4[0];
const float vi4x1 = i4[1];
i4 += 2;
const float vk4x0 = w[10];
vacc0p0 = math_muladd_f32(vi4x0, vk4x0, vacc0p0);
const float vk4x1 = w[11];
vacc1p0 = math_muladd_f32(vi4x1, vk4x1, vacc1p0);
const float vi5x0 = i5[0];
const float vi5x1 = i5[1];
i5 += 2;
const float vk5x0 = w[12];
vacc0p1 = math_muladd_f32(vi5x0, vk5x0, vacc0p1);
const float vk5x1 = w[13];
vacc1p1 = math_muladd_f32(vi5x1, vk5x1, vacc1p1);
const float vi6x0 = i6[0];
const float vi6x1 = i6[1];
i6 += 2;
const float vk6x0 = w[14];
vacc0p0 = math_muladd_f32(vi6x0, vk6x0, vacc0p0);
const float vk6x1 = w[15];
vacc1p0 = math_muladd_f32(vi6x1, vk6x1, vacc1p0);
const float vi7x0 = i7[0];
const float vi7x1 = i7[1];
i7 += 2;
const float vk7x0 = w[16];
vacc0p1 = math_muladd_f32(vi7x0, vk7x0, vacc0p1);
const float vk7x1 = w[17];
vacc1p1 = math_muladd_f32(vi7x1, vk7x1, vacc1p1);
const float vi8x0 = i8[0];
const float vi8x1 = i8[1];
i8 += 2;
const float vk8x0 = w[18];
vacc0p0 = math_muladd_f32(vi8x0, vk8x0, vacc0p0);
const float vk8x1 = w[19];
vacc1p0 = math_muladd_f32(vi8x1, vk8x1, vacc1p0);
w += 20;
// Add up all accumulators to vacc01p0
vacc0p0 = vacc0p0 + vacc0p1;
vacc1p0 = vacc1p0 + vacc1p1;
float vacc0 = math_max_f32(vacc0p0, vmin);
float vacc1 = math_max_f32(vacc1p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
vacc1 = math_min_f32(vacc1, vmax);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
for (; c >= 1; c -= 1) {
float vacc0p0 = *w++;
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[3];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[5];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[7];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[9];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[11];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
const float vi6 = *i6++;
const float vk6 = w[13];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[15];
vacc0p1 = math_muladd_f32(vi7, vk7, vacc0p1);
const float vi8 = *i8++;
const float vk8 = w[17];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
// Add up all accumulators to vacc01p0
vacc0p0 = vacc0p0 + vacc0p1;
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,557 | 27.889868 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p2c-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_9p2c__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 2; c -= 2) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
i0 += 2;
const float vk0x0 = w[2];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vk0x1 = w[3];
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
i1 += 2;
const float vk1x0 = w[4];
vacc0p0 = math_muladd_f32(vi1x0, vk1x0, vacc0p0);
const float vk1x1 = w[5];
vacc1p0 = math_muladd_f32(vi1x1, vk1x1, vacc1p0);
const float vi2x0 = i2[0];
const float vi2x1 = i2[1];
i2 += 2;
const float vk2x0 = w[6];
vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
const float vk2x1 = w[7];
vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
const float vi3x0 = i3[0];
const float vi3x1 = i3[1];
i3 += 2;
const float vk3x0 = w[8];
vacc0p0 = math_muladd_f32(vi3x0, vk3x0, vacc0p0);
const float vk3x1 = w[9];
vacc1p0 = math_muladd_f32(vi3x1, vk3x1, vacc1p0);
const float vi4x0 = i4[0];
const float vi4x1 = i4[1];
i4 += 2;
const float vk4x0 = w[10];
vacc0p0 = math_muladd_f32(vi4x0, vk4x0, vacc0p0);
const float vk4x1 = w[11];
vacc1p0 = math_muladd_f32(vi4x1, vk4x1, vacc1p0);
const float vi5x0 = i5[0];
const float vi5x1 = i5[1];
i5 += 2;
const float vk5x0 = w[12];
vacc0p0 = math_muladd_f32(vi5x0, vk5x0, vacc0p0);
const float vk5x1 = w[13];
vacc1p0 = math_muladd_f32(vi5x1, vk5x1, vacc1p0);
const float vi6x0 = i6[0];
const float vi6x1 = i6[1];
i6 += 2;
const float vk6x0 = w[14];
vacc0p0 = math_muladd_f32(vi6x0, vk6x0, vacc0p0);
const float vk6x1 = w[15];
vacc1p0 = math_muladd_f32(vi6x1, vk6x1, vacc1p0);
const float vi7x0 = i7[0];
const float vi7x1 = i7[1];
i7 += 2;
const float vk7x0 = w[16];
vacc0p0 = math_muladd_f32(vi7x0, vk7x0, vacc0p0);
const float vk7x1 = w[17];
vacc1p0 = math_muladd_f32(vi7x1, vk7x1, vacc1p0);
const float vi8x0 = i8[0];
const float vi8x1 = i8[1];
i8 += 2;
const float vk8x0 = w[18];
vacc0p0 = math_muladd_f32(vi8x0, vk8x0, vacc0p0);
const float vk8x1 = w[19];
vacc1p0 = math_muladd_f32(vi8x1, vk8x1, vacc1p0);
w += 20;
float vacc0 = math_max_f32(vacc0p0, vmin);
float vacc1 = math_max_f32(vacc1p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
vacc1 = math_min_f32(vacc1, vmax);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
for (; c >= 1; c -= 1) {
float vacc0p0 = *w++;
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[3];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[5];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[7];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[9];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[11];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
const float vi6 = *i6++;
const float vk6 = w[13];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[15];
vacc0p0 = math_muladd_f32(vi7, vk7, vacc0p0);
const float vi8 = *i8++;
const float vk8 = w[17];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,414 | 27.896396 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p2c-minmax-wasm-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_9p2c__wasm_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 2; c -= 2) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
i0 += 2;
const float vk0x0 = w[2];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vk0x1 = w[3];
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
i1 += 2;
const float vk1x0 = w[4];
float vacc0p1 = vi1x0 * vk1x0;
const float vk1x1 = w[5];
float vacc1p1 = vi1x1 * vk1x1;
const float vi2x0 = i2[0];
const float vi2x1 = i2[1];
i2 += 2;
const float vk2x0 = w[6];
vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
const float vk2x1 = w[7];
vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
const float vi3x0 = i3[0];
const float vi3x1 = i3[1];
i3 += 2;
const float vk3x0 = w[8];
vacc0p1 = math_muladd_f32(vi3x0, vk3x0, vacc0p1);
const float vk3x1 = w[9];
vacc1p1 = math_muladd_f32(vi3x1, vk3x1, vacc1p1);
const float vi4x0 = i4[0];
const float vi4x1 = i4[1];
i4 += 2;
const float vk4x0 = w[10];
vacc0p0 = math_muladd_f32(vi4x0, vk4x0, vacc0p0);
const float vk4x1 = w[11];
vacc1p0 = math_muladd_f32(vi4x1, vk4x1, vacc1p0);
const float vi5x0 = i5[0];
const float vi5x1 = i5[1];
i5 += 2;
const float vk5x0 = w[12];
vacc0p1 = math_muladd_f32(vi5x0, vk5x0, vacc0p1);
const float vk5x1 = w[13];
vacc1p1 = math_muladd_f32(vi5x1, vk5x1, vacc1p1);
const float vi6x0 = i6[0];
const float vi6x1 = i6[1];
i6 += 2;
const float vk6x0 = w[14];
vacc0p0 = math_muladd_f32(vi6x0, vk6x0, vacc0p0);
const float vk6x1 = w[15];
vacc1p0 = math_muladd_f32(vi6x1, vk6x1, vacc1p0);
const float vi7x0 = i7[0];
const float vi7x1 = i7[1];
i7 += 2;
const float vk7x0 = w[16];
vacc0p1 = math_muladd_f32(vi7x0, vk7x0, vacc0p1);
const float vk7x1 = w[17];
vacc1p1 = math_muladd_f32(vi7x1, vk7x1, vacc1p1);
const float vi8x0 = i8[0];
const float vi8x1 = i8[1];
i8 += 2;
const float vk8x0 = w[18];
vacc0p0 = math_muladd_f32(vi8x0, vk8x0, vacc0p0);
const float vk8x1 = w[19];
vacc1p0 = math_muladd_f32(vi8x1, vk8x1, vacc1p0);
w += 20;
// Add up all accumulators to vacc01p0
vacc0p0 = vacc0p0 + vacc0p1;
vacc1p0 = vacc1p0 + vacc1p1;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
float vacc1 = __builtin_wasm_max_f32(vacc1p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
vacc1 = __builtin_wasm_min_f32(vacc1, vmax);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
for (; c >= 1; c -= 1) {
float vacc0p0 = *w++;
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[3];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[5];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[7];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[9];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[11];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
const float vi6 = *i6++;
const float vk6 = w[13];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[15];
vacc0p1 = math_muladd_f32(vi7, vk7, vacc0p1);
const float vi8 = *i8++;
const float vk8 = w[17];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
// Add up all accumulators to vacc01p0
vacc0p0 = vacc0p0 + vacc0p1;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
*output++ = vacc0;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,615 | 28.145374 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p2c-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_9p2c__wasm(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 2; c -= 2) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
i0 += 2;
const float vk0x0 = w[2];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vk0x1 = w[3];
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
i1 += 2;
const float vk1x0 = w[4];
vacc0p0 = math_muladd_f32(vi1x0, vk1x0, vacc0p0);
const float vk1x1 = w[5];
vacc1p0 = math_muladd_f32(vi1x1, vk1x1, vacc1p0);
const float vi2x0 = i2[0];
const float vi2x1 = i2[1];
i2 += 2;
const float vk2x0 = w[6];
vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
const float vk2x1 = w[7];
vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
const float vi3x0 = i3[0];
const float vi3x1 = i3[1];
i3 += 2;
const float vk3x0 = w[8];
vacc0p0 = math_muladd_f32(vi3x0, vk3x0, vacc0p0);
const float vk3x1 = w[9];
vacc1p0 = math_muladd_f32(vi3x1, vk3x1, vacc1p0);
const float vi4x0 = i4[0];
const float vi4x1 = i4[1];
i4 += 2;
const float vk4x0 = w[10];
vacc0p0 = math_muladd_f32(vi4x0, vk4x0, vacc0p0);
const float vk4x1 = w[11];
vacc1p0 = math_muladd_f32(vi4x1, vk4x1, vacc1p0);
const float vi5x0 = i5[0];
const float vi5x1 = i5[1];
i5 += 2;
const float vk5x0 = w[12];
vacc0p0 = math_muladd_f32(vi5x0, vk5x0, vacc0p0);
const float vk5x1 = w[13];
vacc1p0 = math_muladd_f32(vi5x1, vk5x1, vacc1p0);
const float vi6x0 = i6[0];
const float vi6x1 = i6[1];
i6 += 2;
const float vk6x0 = w[14];
vacc0p0 = math_muladd_f32(vi6x0, vk6x0, vacc0p0);
const float vk6x1 = w[15];
vacc1p0 = math_muladd_f32(vi6x1, vk6x1, vacc1p0);
const float vi7x0 = i7[0];
const float vi7x1 = i7[1];
i7 += 2;
const float vk7x0 = w[16];
vacc0p0 = math_muladd_f32(vi7x0, vk7x0, vacc0p0);
const float vk7x1 = w[17];
vacc1p0 = math_muladd_f32(vi7x1, vk7x1, vacc1p0);
const float vi8x0 = i8[0];
const float vi8x1 = i8[1];
i8 += 2;
const float vk8x0 = w[18];
vacc0p0 = math_muladd_f32(vi8x0, vk8x0, vacc0p0);
const float vk8x1 = w[19];
vacc1p0 = math_muladd_f32(vi8x1, vk8x1, vacc1p0);
w += 20;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
float vacc1 = __builtin_wasm_max_f32(vacc1p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
vacc1 = __builtin_wasm_min_f32(vacc1, vmax);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
for (; c >= 1; c -= 1) {
float vacc0p0 = *w++;
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[3];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[5];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[7];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[9];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[11];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
const float vi6 = *i6++;
const float vk6 = w[13];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[15];
vacc0p0 = math_muladd_f32(vi7, vk7, vacc0p0);
const float vi8 = *i8++;
const float vk8 = w[17];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
*output++ = vacc0;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,472 | 28.157658 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p2c-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_9p2c__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 2; c -= 2) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
i0 += 2;
const float vk0x0 = w[2];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vk0x1 = w[3];
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
i1 += 2;
const float vk1x0 = w[4];
float vacc0p1 = vi1x0 * vk1x0;
const float vk1x1 = w[5];
float vacc1p1 = vi1x1 * vk1x1;
const float vi2x0 = i2[0];
const float vi2x1 = i2[1];
i2 += 2;
const float vk2x0 = w[6];
vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
const float vk2x1 = w[7];
vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
const float vi3x0 = i3[0];
const float vi3x1 = i3[1];
i3 += 2;
const float vk3x0 = w[8];
vacc0p1 = math_muladd_f32(vi3x0, vk3x0, vacc0p1);
const float vk3x1 = w[9];
vacc1p1 = math_muladd_f32(vi3x1, vk3x1, vacc1p1);
const float vi4x0 = i4[0];
const float vi4x1 = i4[1];
i4 += 2;
const float vk4x0 = w[10];
vacc0p0 = math_muladd_f32(vi4x0, vk4x0, vacc0p0);
const float vk4x1 = w[11];
vacc1p0 = math_muladd_f32(vi4x1, vk4x1, vacc1p0);
const float vi5x0 = i5[0];
const float vi5x1 = i5[1];
i5 += 2;
const float vk5x0 = w[12];
vacc0p1 = math_muladd_f32(vi5x0, vk5x0, vacc0p1);
const float vk5x1 = w[13];
vacc1p1 = math_muladd_f32(vi5x1, vk5x1, vacc1p1);
const float vi6x0 = i6[0];
const float vi6x1 = i6[1];
i6 += 2;
const float vk6x0 = w[14];
vacc0p0 = math_muladd_f32(vi6x0, vk6x0, vacc0p0);
const float vk6x1 = w[15];
vacc1p0 = math_muladd_f32(vi6x1, vk6x1, vacc1p0);
const float vi7x0 = i7[0];
const float vi7x1 = i7[1];
i7 += 2;
const float vk7x0 = w[16];
vacc0p1 = math_muladd_f32(vi7x0, vk7x0, vacc0p1);
const float vk7x1 = w[17];
vacc1p1 = math_muladd_f32(vi7x1, vk7x1, vacc1p1);
const float vi8x0 = i8[0];
const float vi8x1 = i8[1];
i8 += 2;
const float vk8x0 = w[18];
vacc0p0 = math_muladd_f32(vi8x0, vk8x0, vacc0p0);
const float vk8x1 = w[19];
vacc1p0 = math_muladd_f32(vi8x1, vk8x1, vacc1p0);
w += 20;
// Add up all accumulators to vacc01p0
vacc0p0 = vacc0p0 + vacc0p1;
vacc1p0 = vacc1p0 + vacc1p1;
output[0] = vacc0p0;
output[1] = vacc1p0;
output += 2;
}
for (; c >= 1; c -= 1) {
float vacc0p0 = *w++;
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[3];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[5];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[7];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[9];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[11];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
const float vi6 = *i6++;
const float vk6 = w[13];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[15];
vacc0p1 = math_muladd_f32(vi7, vk7, vacc0p1);
const float vi8 = *i8++;
const float vk8 = w[17];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
// Add up all accumulators to vacc01p0
vacc0p0 = vacc0p0 + vacc0p1;
*output++ = vacc0p0;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,203 | 27.589862 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p2c-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_9p2c__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 2; c -= 2) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
i0 += 2;
const float vk0x0 = w[2];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vk0x1 = w[3];
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
i1 += 2;
const float vk1x0 = w[4];
vacc0p0 = math_muladd_f32(vi1x0, vk1x0, vacc0p0);
const float vk1x1 = w[5];
vacc1p0 = math_muladd_f32(vi1x1, vk1x1, vacc1p0);
const float vi2x0 = i2[0];
const float vi2x1 = i2[1];
i2 += 2;
const float vk2x0 = w[6];
vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
const float vk2x1 = w[7];
vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
const float vi3x0 = i3[0];
const float vi3x1 = i3[1];
i3 += 2;
const float vk3x0 = w[8];
vacc0p0 = math_muladd_f32(vi3x0, vk3x0, vacc0p0);
const float vk3x1 = w[9];
vacc1p0 = math_muladd_f32(vi3x1, vk3x1, vacc1p0);
const float vi4x0 = i4[0];
const float vi4x1 = i4[1];
i4 += 2;
const float vk4x0 = w[10];
vacc0p0 = math_muladd_f32(vi4x0, vk4x0, vacc0p0);
const float vk4x1 = w[11];
vacc1p0 = math_muladd_f32(vi4x1, vk4x1, vacc1p0);
const float vi5x0 = i5[0];
const float vi5x1 = i5[1];
i5 += 2;
const float vk5x0 = w[12];
vacc0p0 = math_muladd_f32(vi5x0, vk5x0, vacc0p0);
const float vk5x1 = w[13];
vacc1p0 = math_muladd_f32(vi5x1, vk5x1, vacc1p0);
const float vi6x0 = i6[0];
const float vi6x1 = i6[1];
i6 += 2;
const float vk6x0 = w[14];
vacc0p0 = math_muladd_f32(vi6x0, vk6x0, vacc0p0);
const float vk6x1 = w[15];
vacc1p0 = math_muladd_f32(vi6x1, vk6x1, vacc1p0);
const float vi7x0 = i7[0];
const float vi7x1 = i7[1];
i7 += 2;
const float vk7x0 = w[16];
vacc0p0 = math_muladd_f32(vi7x0, vk7x0, vacc0p0);
const float vk7x1 = w[17];
vacc1p0 = math_muladd_f32(vi7x1, vk7x1, vacc1p0);
const float vi8x0 = i8[0];
const float vi8x1 = i8[1];
i8 += 2;
const float vk8x0 = w[18];
vacc0p0 = math_muladd_f32(vi8x0, vk8x0, vacc0p0);
const float vk8x1 = w[19];
vacc1p0 = math_muladd_f32(vi8x1, vk8x1, vacc1p0);
w += 20;
output[0] = vacc0p0;
output[1] = vacc1p0;
output += 2;
}
for (; c >= 1; c -= 1) {
float vacc0p0 = *w++;
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[3];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[5];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[7];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[9];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[11];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
const float vi6 = *i6++;
const float vk6 = w[13];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[15];
vacc0p0 = math_muladd_f32(vi7, vk7, vacc0p0);
const float vi8 = *i8++;
const float vk8 = w[17];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
*output++ = vacc0p0;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,060 | 27.589623 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p32c-minmax-avx512f-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx512.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_dwconv_minmax_ukernel_9p32c__avx512f_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 32; c -= 32) {
__m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
__m512 vaccGHIJKLMNOPQRSTUVp0 = _mm512_load_ps(w + 16);
const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0);
const __m512 vi0xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i0 + 16);
i0 += 32;
const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 32);
const __m512 vk0xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 48);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi0xGHIJKLMNOPQRSTUV, vk0xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1);
const __m512 vi1xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i1 + 16);
i1 += 32;
const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 64);
const __m512 vk1xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 80);
__m512 vacc0123456789ABCDEFp1 = _mm512_mul_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
__m512 vaccGHIJKLMNOPQRSTUVp1 = _mm512_mul_ps(vi1xGHIJKLMNOPQRSTUV, vk1xGHIJKLMNOPQRSTUV);
const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2);
const __m512 vi2xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i2 + 16);
i2 += 32;
const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 96);
const __m512 vk2xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 112);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi2xGHIJKLMNOPQRSTUV, vk2xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
const __m512 vi3x0123456789ABCDEF = _mm512_loadu_ps(i3);
const __m512 vi3xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i3 + 16);
i3 += 32;
const __m512 vk3x0123456789ABCDEF = _mm512_load_ps(w + 128);
const __m512 vk3xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 144);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp1);
vaccGHIJKLMNOPQRSTUVp1 = _mm512_fmadd_ps(vi3xGHIJKLMNOPQRSTUV, vk3xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp1);
const __m512 vi4x0123456789ABCDEF = _mm512_loadu_ps(i4);
const __m512 vi4xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i4 + 16);
i4 += 32;
const __m512 vk4x0123456789ABCDEF = _mm512_load_ps(w + 160);
const __m512 vk4xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 176);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi4xGHIJKLMNOPQRSTUV, vk4xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
const __m512 vi5x0123456789ABCDEF = _mm512_loadu_ps(i5);
const __m512 vi5xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i5 + 16);
i5 += 32;
const __m512 vk5x0123456789ABCDEF = _mm512_load_ps(w + 192);
const __m512 vk5xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 208);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF, vacc0123456789ABCDEFp1);
vaccGHIJKLMNOPQRSTUVp1 = _mm512_fmadd_ps(vi5xGHIJKLMNOPQRSTUV, vk5xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp1);
const __m512 vi6x0123456789ABCDEF = _mm512_loadu_ps(i6);
const __m512 vi6xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i6 + 16);
i6 += 32;
const __m512 vk6x0123456789ABCDEF = _mm512_load_ps(w + 224);
const __m512 vk6xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 240);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi6xGHIJKLMNOPQRSTUV, vk6xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
const __m512 vi7x0123456789ABCDEF = _mm512_loadu_ps(i7);
const __m512 vi7xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i7 + 16);
i7 += 32;
const __m512 vk7x0123456789ABCDEF = _mm512_load_ps(w + 256);
const __m512 vk7xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 272);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF, vacc0123456789ABCDEFp1);
vaccGHIJKLMNOPQRSTUVp1 = _mm512_fmadd_ps(vi7xGHIJKLMNOPQRSTUV, vk7xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp1);
const __m512 vi8x0123456789ABCDEF = _mm512_loadu_ps(i8);
const __m512 vi8xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i8 + 16);
i8 += 32;
const __m512 vk8x0123456789ABCDEF = _mm512_load_ps(w + 288);
const __m512 vk8xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 304);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi8xGHIJKLMNOPQRSTUV, vk8xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
w += 320;
// Add up all accumulators to vacc0123456789ABCDEFGHIJKLMNOPQRSTUVp0
vacc0123456789ABCDEFp0 = _mm512_add_ps(vacc0123456789ABCDEFp0, vacc0123456789ABCDEFp1);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_add_ps(vaccGHIJKLMNOPQRSTUVp0, vaccGHIJKLMNOPQRSTUVp1);
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
__m512 vaccGHIJKLMNOPQRSTUV = _mm512_max_ps(vmin, vaccGHIJKLMNOPQRSTUVp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
vaccGHIJKLMNOPQRSTUV = _mm512_min_ps(vmax, vaccGHIJKLMNOPQRSTUV);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
_mm512_storeu_ps(output + 16, vaccGHIJKLMNOPQRSTUV);
output += 32;
}
for (; c >= 16; c -= 16) {
__m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0);
i0 += 16;
const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 32);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1);
i1 += 16;
const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 64);
__m512 vacc0123456789ABCDEFp1 = _mm512_mul_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2);
i2 += 16;
const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 96);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi3x0123456789ABCDEF = _mm512_loadu_ps(i3);
i3 += 16;
const __m512 vk3x0123456789ABCDEF = _mm512_load_ps(w + 128);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi4x0123456789ABCDEF = _mm512_loadu_ps(i4);
i4 += 16;
const __m512 vk4x0123456789ABCDEF = _mm512_load_ps(w + 160);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi5x0123456789ABCDEF = _mm512_loadu_ps(i5);
i5 += 16;
const __m512 vk5x0123456789ABCDEF = _mm512_load_ps(w + 192);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi6x0123456789ABCDEF = _mm512_loadu_ps(i6);
i6 += 16;
const __m512 vk6x0123456789ABCDEF = _mm512_load_ps(w + 224);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi7x0123456789ABCDEF = _mm512_loadu_ps(i7);
i7 += 16;
const __m512 vk7x0123456789ABCDEF = _mm512_load_ps(w + 256);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi8x0123456789ABCDEF = _mm512_loadu_ps(i8);
i8 += 16;
const __m512 vk8x0123456789ABCDEF = _mm512_load_ps(w + 288);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF, vacc0123456789ABCDEFp0);
w += 16;
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc0123456789ABCDEFp0 = _mm512_add_ps(vacc0123456789ABCDEFp0, vacc0123456789ABCDEFp1);
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 16);
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
__m512 vacc0123456789ABCDEFp0 = _mm512_maskz_loadu_ps(vmask, w);
const __m512 vi0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i0);
const __m512 vk0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 32);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i1);
const __m512 vk1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 64);
__m512 vacc0123456789ABCDEFp1 = _mm512_mul_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
const __m512 vi2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i2);
const __m512 vk2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 96);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i3);
const __m512 vk3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 128);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi4x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i4);
const __m512 vk4x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 160);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi5x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i5);
const __m512 vk5x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 192);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi6x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i6);
const __m512 vk6x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 224);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi7x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i7);
const __m512 vk7x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 256);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi8x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i8);
const __m512 vk8x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 288);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF, vacc0123456789ABCDEFp0);
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc0123456789ABCDEFp0 = _mm512_add_ps(vacc0123456789ABCDEFp0, vacc0123456789ABCDEFp1);
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_mask_storeu_ps(output, vmask, vacc0123456789ABCDEF);
output += c;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 14,068 | 44.237942 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p32c-minmax-avx512f.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx512.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_dwconv_minmax_ukernel_9p32c__avx512f(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 32; c -= 32) {
__m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
__m512 vaccGHIJKLMNOPQRSTUVp0 = _mm512_load_ps(w + 16);
const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0);
const __m512 vi0xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i0 + 16);
i0 += 32;
const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 32);
const __m512 vk0xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 48);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi0xGHIJKLMNOPQRSTUV, vk0xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1);
const __m512 vi1xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i1 + 16);
i1 += 32;
const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 64);
const __m512 vk1xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 80);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi1xGHIJKLMNOPQRSTUV, vk1xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2);
const __m512 vi2xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i2 + 16);
i2 += 32;
const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 96);
const __m512 vk2xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 112);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi2xGHIJKLMNOPQRSTUV, vk2xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
const __m512 vi3x0123456789ABCDEF = _mm512_loadu_ps(i3);
const __m512 vi3xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i3 + 16);
i3 += 32;
const __m512 vk3x0123456789ABCDEF = _mm512_load_ps(w + 128);
const __m512 vk3xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 144);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi3xGHIJKLMNOPQRSTUV, vk3xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
const __m512 vi4x0123456789ABCDEF = _mm512_loadu_ps(i4);
const __m512 vi4xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i4 + 16);
i4 += 32;
const __m512 vk4x0123456789ABCDEF = _mm512_load_ps(w + 160);
const __m512 vk4xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 176);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi4xGHIJKLMNOPQRSTUV, vk4xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
const __m512 vi5x0123456789ABCDEF = _mm512_loadu_ps(i5);
const __m512 vi5xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i5 + 16);
i5 += 32;
const __m512 vk5x0123456789ABCDEF = _mm512_load_ps(w + 192);
const __m512 vk5xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 208);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi5xGHIJKLMNOPQRSTUV, vk5xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
const __m512 vi6x0123456789ABCDEF = _mm512_loadu_ps(i6);
const __m512 vi6xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i6 + 16);
i6 += 32;
const __m512 vk6x0123456789ABCDEF = _mm512_load_ps(w + 224);
const __m512 vk6xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 240);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi6xGHIJKLMNOPQRSTUV, vk6xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
const __m512 vi7x0123456789ABCDEF = _mm512_loadu_ps(i7);
const __m512 vi7xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i7 + 16);
i7 += 32;
const __m512 vk7x0123456789ABCDEF = _mm512_load_ps(w + 256);
const __m512 vk7xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 272);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi7xGHIJKLMNOPQRSTUV, vk7xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
const __m512 vi8x0123456789ABCDEF = _mm512_loadu_ps(i8);
const __m512 vi8xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i8 + 16);
i8 += 32;
const __m512 vk8x0123456789ABCDEF = _mm512_load_ps(w + 288);
const __m512 vk8xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 304);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi8xGHIJKLMNOPQRSTUV, vk8xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
w += 320;
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
__m512 vaccGHIJKLMNOPQRSTUV = _mm512_max_ps(vmin, vaccGHIJKLMNOPQRSTUVp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
vaccGHIJKLMNOPQRSTUV = _mm512_min_ps(vmax, vaccGHIJKLMNOPQRSTUV);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
_mm512_storeu_ps(output + 16, vaccGHIJKLMNOPQRSTUV);
output += 32;
}
for (; c >= 16; c -= 16) {
__m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0);
i0 += 16;
const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 32);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1);
i1 += 16;
const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 64);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2);
i2 += 16;
const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 96);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi3x0123456789ABCDEF = _mm512_loadu_ps(i3);
i3 += 16;
const __m512 vk3x0123456789ABCDEF = _mm512_load_ps(w + 128);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi4x0123456789ABCDEF = _mm512_loadu_ps(i4);
i4 += 16;
const __m512 vk4x0123456789ABCDEF = _mm512_load_ps(w + 160);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi5x0123456789ABCDEF = _mm512_loadu_ps(i5);
i5 += 16;
const __m512 vk5x0123456789ABCDEF = _mm512_load_ps(w + 192);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi6x0123456789ABCDEF = _mm512_loadu_ps(i6);
i6 += 16;
const __m512 vk6x0123456789ABCDEF = _mm512_load_ps(w + 224);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi7x0123456789ABCDEF = _mm512_loadu_ps(i7);
i7 += 16;
const __m512 vk7x0123456789ABCDEF = _mm512_load_ps(w + 256);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi8x0123456789ABCDEF = _mm512_loadu_ps(i8);
i8 += 16;
const __m512 vk8x0123456789ABCDEF = _mm512_load_ps(w + 288);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF, vacc0123456789ABCDEFp0);
w += 16;
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 16);
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
__m512 vacc0123456789ABCDEFp0 = _mm512_maskz_loadu_ps(vmask, w);
const __m512 vi0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i0);
const __m512 vk0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 32);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i1);
const __m512 vk1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 64);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i2);
const __m512 vk2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 96);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i3);
const __m512 vk3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 128);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi4x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i4);
const __m512 vk4x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 160);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi5x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i5);
const __m512 vk5x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 192);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi6x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i6);
const __m512 vk6x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 224);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi7x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i7);
const __m512 vk7x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 256);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi8x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i8);
const __m512 vk8x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 288);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF, vacc0123456789ABCDEFp0);
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_mask_storeu_ps(output, vmask, vacc0123456789ABCDEF);
output += c;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 13,570 | 43.641447 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p4c-minmax-neon-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p4c__neon_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,596 | 33.359375 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p4c-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p4c__neon(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,389 | 32.989362 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p4c-minmax-neonfma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p4c__neonfma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,599 | 33.375 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p4c-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p4c__neonfma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,392 | 33.005319 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p4c-minmax-sse-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p4c__sse_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vk5x0123 = _mm_load_ps(w + 24);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vk6x0123 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vk7x0123 = _mm_load_ps(w + 32);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi7x0123, vk7x0123));
const __m128 vi8x0123 = _mm_loadu_ps(i8);
i8 += 4;
const __m128 vk8x0123 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
w += 40;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vk1x0123 = _mm_load_ps(w + 8);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vk4x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
const __m128 vk5x0123 = _mm_load_ps(w + 24);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
const __m128 vk6x0123 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
const __m128 vk7x0123 = _mm_load_ps(w + 32);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi7x0123, vk7x0123));
const __m128 vi8x0123 = _mm_loadu_ps(i8);
const __m128 vk8x0123 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,677 | 30.5 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p4c-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p4c__sse(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vk5x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vk6x0123 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vk7x0123 = _mm_load_ps(w + 32);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi7x0123, vk7x0123));
const __m128 vi8x0123 = _mm_loadu_ps(i8);
i8 += 4;
const __m128 vk8x0123 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
w += 40;
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vk1x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vk4x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
const __m128 vk5x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
const __m128 vk6x0123 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
const __m128 vk7x0123 = _mm_load_ps(w + 32);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi7x0123, vk7x0123));
const __m128 vi8x0123 = _mm_loadu_ps(i8);
const __m128 vk8x0123 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,502 | 30.264423 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p4c-minmax-wasmrelaxedsimd-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p4c__wasmrelaxedsimd_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
w += 40;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,087 | 32.276995 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p4c-minmax-wasmrelaxedsimd-fma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p4c__wasmrelaxedsimd_fma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi5x0123, vk5x0123, vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi6x0123, vk6x0123, vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi7x0123, vk7x0123, vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi8x0123, vk8x0123, vacc0123p0);
w += 40;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi5x0123, vk5x0123, vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi6x0123, vk6x0123, vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi7x0123, vk7x0123, vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi8x0123, vk8x0123, vacc0123p0);
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,139 | 32.521127 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p4c-minmax-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p4c__wasmrelaxedsimd_fma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi5x0123, vk5x0123, vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi6x0123, vk6x0123, vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi7x0123, vk7x0123, vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi8x0123, vk8x0123, vacc0123p0);
w += 40;
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi5x0123, vk5x0123, vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi6x0123, vk6x0123, vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi7x0123, vk7x0123, vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi8x0123, vk8x0123, vacc0123p0);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,970 | 32.354067 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p4c-minmax-wasmrelaxedsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p4c__wasmrelaxedsimd(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
w += 40;
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,912 | 32.076555 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p4c-minmax-wasmsimd-arm-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p4c__wasmsimd_arm_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
w += 40;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,012 | 31.924883 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p4c-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p4c__wasmsimd_arm(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
w += 40;
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,837 | 31.717703 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p4c-minmax-wasmsimd-x86-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p4c__wasmsimd_x86_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
w += 40;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,016 | 31.943662 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p4c-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p4c__wasmsimd_x86(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
w += 40;
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,841 | 31.736842 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p4c-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_ukernel_9p4c__wasmrelaxedsimd_fma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi5x0123, vk5x0123, vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi6x0123, vk6x0123, vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi7x0123, vk7x0123, vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi8x0123, vk8x0123, vacc0123p0);
w += 40;
const v128_t vacc0123 = vacc0123p0;
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi5x0123, vk5x0123, vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi6x0123, vk6x0123, vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi7x0123, vk7x0123, vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi8x0123, vk8x0123, vacc0123p0);
v128_t vacc0123 = vacc0123p0;
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,619 | 31.45098 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p4c-wasmsimd-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_ukernel_9p4c__wasmsimd_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
w += 40;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
const v128_t vacc0123 = vacc0123p0;
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = vacc0123p0;
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,729 | 31.355769 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p4c-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_ukernel_9p4c__wasmsimd(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
w += 40;
const v128_t vacc0123 = vacc0123p0;
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
v128_t vacc0123 = vacc0123p0;
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,554 | 31.132353 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p8c-minmax-avx-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p8c__avx_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
i6 += 8;
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
i7 += 8;
const __m256 vk7x01234567 = _mm256_load_ps(w + 64);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi7x01234567, vk7x01234567));
const __m256 vi8x01234567 = _mm256_loadu_ps(i8);
i8 += 8;
const __m256 vk8x01234567 = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi8x01234567, vk8x01234567));
w += 80;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
const __m256 vi7x01234567 = _mm256_maskload_ps(i7, vmask);
const __m256 vk7x01234567 = _mm256_load_ps(w + 64);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi7x01234567, vk7x01234567));
const __m256 vi8x01234567 = _mm256_maskload_ps(i8, vmask);
const __m256 vk8x01234567 = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi8x01234567, vk8x01234567));
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,864 | 34.427928 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p8c-minmax-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p8c__avx(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
i6 += 8;
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
i7 += 8;
const __m256 vk7x01234567 = _mm256_load_ps(w + 64);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi7x01234567, vk7x01234567));
const __m256 vi8x01234567 = _mm256_loadu_ps(i8);
i8 += 8;
const __m256 vk8x01234567 = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi8x01234567, vk8x01234567));
w += 80;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
const __m256 vi7x01234567 = _mm256_maskload_ps(i7, vmask);
const __m256 vk7x01234567 = _mm256_load_ps(w + 64);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi7x01234567, vk7x01234567));
const __m256 vi8x01234567 = _mm256_maskload_ps(i8, vmask);
const __m256 vk8x01234567 = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi8x01234567, vk8x01234567));
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,665 | 34.165138 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p8c-minmax-fma3-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p8c__fma3_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p1 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p1);
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
i6 += 8;
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
i7 += 8;
const __m256 vk7x01234567 = _mm256_load_ps(w + 64);
vacc01234567p1 = _mm256_fmadd_ps(vi7x01234567, vk7x01234567, vacc01234567p1);
const __m256 vi8x01234567 = _mm256_loadu_ps(i8);
i8 += 8;
const __m256 vk8x01234567 = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_fmadd_ps(vi8x01234567, vk8x01234567, vacc01234567p0);
w += 80;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p1 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p1);
const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
const __m256 vi7x01234567 = _mm256_maskload_ps(i7, vmask);
const __m256 vk7x01234567 = _mm256_load_ps(w + 64);
vacc01234567p1 = _mm256_fmadd_ps(vi7x01234567, vk7x01234567, vacc01234567p1);
const __m256 vi8x01234567 = _mm256_maskload_ps(i8, vmask);
const __m256 vk8x01234567 = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_fmadd_ps(vi8x01234567, vk8x01234567, vacc01234567p0);
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,657 | 33.495495 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p8c-minmax-fma3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p8c__fma3(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0);
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
i6 += 8;
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
i7 += 8;
const __m256 vk7x01234567 = _mm256_load_ps(w + 64);
vacc01234567p0 = _mm256_fmadd_ps(vi7x01234567, vk7x01234567, vacc01234567p0);
const __m256 vi8x01234567 = _mm256_loadu_ps(i8);
i8 += 8;
const __m256 vk8x01234567 = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_fmadd_ps(vi8x01234567, vk8x01234567, vacc01234567p0);
w += 80;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0);
const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
const __m256 vi7x01234567 = _mm256_maskload_ps(i7, vmask);
const __m256 vk7x01234567 = _mm256_load_ps(w + 64);
vacc01234567p0 = _mm256_fmadd_ps(vi7x01234567, vk7x01234567, vacc01234567p0);
const __m256 vi8x01234567 = _mm256_maskload_ps(i8, vmask);
const __m256 vk8x01234567 = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_fmadd_ps(vi8x01234567, vk8x01234567, vacc01234567p0);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,432 | 33.09633 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p8c-minmax-neon-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p8c__neon_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi5x4567, vk5x4567);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi6x4567, vk6x4567);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi7x4567, vk7x4567);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vi8x4567 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk8x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi8x4567, vk8x4567);
// Add up all accumulators to vacc01234567p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 4);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 12);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 20);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w + 28);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w + 36);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w + 44);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w + 52);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w + 60);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w + 68);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 8);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 16);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 24);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w + 32);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
const float32x4_t vk4x0123 = vld1q_f32(w + 40);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
const float32x4_t vk5x0123 = vld1q_f32(w + 48);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
const float32x4_t vk6x0123 = vld1q_f32(w + 56);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
const float32x4_t vk7x0123 = vld1q_f32(w + 64);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
const float32x4_t vk8x0123 = vld1q_f32(w + 72);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,349 | 37.051471 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p8c-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p8c__neon(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi5x4567, vk5x4567);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi6x4567, vk6x4567);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi7x0123, vk7x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi7x4567, vk7x4567);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vi8x4567 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk8x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi8x4567, vk8x4567);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 4);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 12);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 20);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w + 28);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w + 36);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w + 44);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w + 52);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w + 60);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w + 68);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 8);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 16);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 24);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w + 32);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
const float32x4_t vk4x0123 = vld1q_f32(w + 40);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
const float32x4_t vk5x0123 = vld1q_f32(w + 48);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
const float32x4_t vk6x0123 = vld1q_f32(w + 56);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
const float32x4_t vk7x0123 = vld1q_f32(w + 64);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
const float32x4_t vk8x0123 = vld1q_f32(w + 72);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,983 | 36.675472 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p8c-minmax-neonfma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p8c__neonfma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi5x4567, vk5x4567);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi6x4567, vk6x4567);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi7x4567, vk7x4567);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vi8x4567 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk8x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi8x4567, vk8x4567);
// Add up all accumulators to vacc01234567p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 4);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 12);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 20);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w + 28);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w + 36);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w + 44);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w + 52);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w + 60);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w + 68);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 8);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 16);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 24);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w + 32);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
const float32x4_t vk4x0123 = vld1q_f32(w + 40);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
const float32x4_t vk5x0123 = vld1q_f32(w + 48);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
const float32x4_t vk6x0123 = vld1q_f32(w + 56);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
const float32x4_t vk7x0123 = vld1q_f32(w + 64);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
const float32x4_t vk8x0123 = vld1q_f32(w + 72);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,352 | 37.0625 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p8c-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p8c__neonfma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi5x4567, vk5x4567);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi6x4567, vk6x4567);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi7x4567, vk7x4567);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vi8x4567 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk8x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi8x4567, vk8x4567);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 4);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 12);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 20);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w + 28);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w + 36);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w + 44);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w + 52);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w + 60);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w + 68);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 8);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 16);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 24);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w + 32);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
const float32x4_t vk4x0123 = vld1q_f32(w + 40);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
const float32x4_t vk5x0123 = vld1q_f32(w + 48);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
const float32x4_t vk6x0123 = vld1q_f32(w + 56);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
const float32x4_t vk7x0123 = vld1q_f32(w + 64);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
const float32x4_t vk8x0123 = vld1q_f32(w + 72);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,986 | 36.686792 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p8c-minmax-sse-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p8c__sse_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
__m128 vacc0123p0 = _mm_load_ps(w);
__m128 vacc4567p0 = _mm_load_ps(w + 4);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vk0x0123 = _mm_load_ps(w + 8);
const __m128 vk0x4567 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vk1x0123 = _mm_load_ps(w + 16);
const __m128 vk1x4567 = _mm_load_ps(w + 20);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
__m128 vacc4567p1 = _mm_mul_ps(vi1x4567, vk1x4567);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vk2x0123 = _mm_load_ps(w + 24);
const __m128 vk2x4567 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vk3x0123 = _mm_load_ps(w + 32);
const __m128 vk3x4567 = _mm_load_ps(w + 36);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi3x4567, vk3x4567));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
i4 += 8;
const __m128 vk4x0123 = _mm_load_ps(w + 40);
const __m128 vk4x4567 = _mm_load_ps(w + 44);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
const __m128 vi5x4567 = _mm_loadu_ps(i5 + 4);
i5 += 8;
const __m128 vk5x0123 = _mm_load_ps(w + 48);
const __m128 vk5x4567 = _mm_load_ps(w + 52);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi5x4567, vk5x4567));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
const __m128 vi6x4567 = _mm_loadu_ps(i6 + 4);
i6 += 8;
const __m128 vk6x0123 = _mm_load_ps(w + 56);
const __m128 vk6x4567 = _mm_load_ps(w + 60);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi6x4567, vk6x4567));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
const __m128 vi7x4567 = _mm_loadu_ps(i7 + 4);
i7 += 8;
const __m128 vk7x0123 = _mm_load_ps(w + 64);
const __m128 vk7x4567 = _mm_load_ps(w + 68);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi7x0123, vk7x0123));
vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi7x4567, vk7x4567));
const __m128 vi8x0123 = _mm_loadu_ps(i8);
const __m128 vi8x4567 = _mm_loadu_ps(i8 + 4);
i8 += 8;
const __m128 vk8x0123 = _mm_load_ps(w + 72);
const __m128 vk8x4567 = _mm_load_ps(w + 76);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi8x4567, vk8x4567));
w += 80;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
vacc4567p0 = _mm_add_ps(vacc4567p0, vacc4567p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
__m128 vacc4567 = _mm_max_ps(vacc4567p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
vacc4567 = _mm_min_ps(vacc4567, vmax);
_mm_storeu_ps(output, vacc0123);
_mm_storeu_ps(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 16);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 32);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 40);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vk5x0123 = _mm_load_ps(w + 48);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vk6x0123 = _mm_load_ps(w + 56);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vk7x0123 = _mm_load_ps(w + 64);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi7x0123, vk7x0123));
const __m128 vi8x0123 = _mm_loadu_ps(i8);
i8 += 4;
const __m128 vk8x0123 = _mm_load_ps(w + 72);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
w += 4;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vk0x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vk1x0123 = _mm_load_ps(w + 16);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vk2x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vk3x0123 = _mm_load_ps(w + 32);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vk4x0123 = _mm_load_ps(w + 40);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
const __m128 vk5x0123 = _mm_load_ps(w + 48);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
const __m128 vk6x0123 = _mm_load_ps(w + 56);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
const __m128 vk7x0123 = _mm_load_ps(w + 64);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi7x0123, vk7x0123));
const __m128 vi8x0123 = _mm_loadu_ps(i8);
const __m128 vk8x0123 = _mm_load_ps(w + 72);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
// Add up all accumulators to vacc01234567p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,574 | 32.894231 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p8c-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p8c__sse(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
__m128 vacc0123p0 = _mm_load_ps(w);
__m128 vacc4567p0 = _mm_load_ps(w + 4);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vk0x0123 = _mm_load_ps(w + 8);
const __m128 vk0x4567 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vk1x0123 = _mm_load_ps(w + 16);
const __m128 vk1x4567 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi1x4567, vk1x4567));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vk2x0123 = _mm_load_ps(w + 24);
const __m128 vk2x4567 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vk3x0123 = _mm_load_ps(w + 32);
const __m128 vk3x4567 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi3x4567, vk3x4567));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
i4 += 8;
const __m128 vk4x0123 = _mm_load_ps(w + 40);
const __m128 vk4x4567 = _mm_load_ps(w + 44);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
const __m128 vi5x4567 = _mm_loadu_ps(i5 + 4);
i5 += 8;
const __m128 vk5x0123 = _mm_load_ps(w + 48);
const __m128 vk5x4567 = _mm_load_ps(w + 52);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi5x4567, vk5x4567));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
const __m128 vi6x4567 = _mm_loadu_ps(i6 + 4);
i6 += 8;
const __m128 vk6x0123 = _mm_load_ps(w + 56);
const __m128 vk6x4567 = _mm_load_ps(w + 60);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi6x4567, vk6x4567));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
const __m128 vi7x4567 = _mm_loadu_ps(i7 + 4);
i7 += 8;
const __m128 vk7x0123 = _mm_load_ps(w + 64);
const __m128 vk7x4567 = _mm_load_ps(w + 68);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi7x0123, vk7x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi7x4567, vk7x4567));
const __m128 vi8x0123 = _mm_loadu_ps(i8);
const __m128 vi8x4567 = _mm_loadu_ps(i8 + 4);
i8 += 8;
const __m128 vk8x0123 = _mm_load_ps(w + 72);
const __m128 vk8x4567 = _mm_load_ps(w + 76);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi8x4567, vk8x4567));
w += 80;
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
__m128 vacc4567 = _mm_max_ps(vacc4567p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
vacc4567 = _mm_min_ps(vacc4567, vmax);
_mm_storeu_ps(output, vacc0123);
_mm_storeu_ps(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 32);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 40);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vk5x0123 = _mm_load_ps(w + 48);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vk6x0123 = _mm_load_ps(w + 56);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vk7x0123 = _mm_load_ps(w + 64);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi7x0123, vk7x0123));
const __m128 vi8x0123 = _mm_loadu_ps(i8);
i8 += 4;
const __m128 vk8x0123 = _mm_load_ps(w + 72);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
w += 4;
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vk0x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vk1x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vk2x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vk3x0123 = _mm_load_ps(w + 32);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vk4x0123 = _mm_load_ps(w + 40);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
const __m128 vk5x0123 = _mm_load_ps(w + 48);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
const __m128 vk6x0123 = _mm_load_ps(w + 56);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
const __m128 vk7x0123 = _mm_load_ps(w + 64);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi7x0123, vk7x0123));
const __m128 vi8x0123 = _mm_loadu_ps(i8);
const __m128 vk8x0123 = _mm_load_ps(w + 72);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,264 | 32.655738 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p8c-minmax-wasmrelaxedsimd-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p8c__wasmrelaxedsimd_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
const v128_t vk3x4567 = wasm_v128_load(w + 36);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
vacc4567p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x4567, vk3x4567), vacc4567p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vi4x4567 = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vk4x0123 = wasm_v128_load(w + 40);
const v128_t vk4x4567 = wasm_v128_load(w + 44);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x4567, vk4x4567), vacc4567p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vi5x4567 = wasm_v128_load(i5 + 4);
i5 += 8;
const v128_t vk5x0123 = wasm_v128_load(w + 48);
const v128_t vk5x4567 = wasm_v128_load(w + 52);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
vacc4567p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x4567, vk5x4567), vacc4567p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vi6x4567 = wasm_v128_load(i6 + 4);
i6 += 8;
const v128_t vk6x0123 = wasm_v128_load(w + 56);
const v128_t vk6x4567 = wasm_v128_load(w + 60);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x4567, vk6x4567), vacc4567p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vi7x4567 = wasm_v128_load(i7 + 4);
i7 += 8;
const v128_t vk7x0123 = wasm_v128_load(w + 64);
const v128_t vk7x4567 = wasm_v128_load(w + 68);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
vacc4567p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x4567, vk7x4567), vacc4567p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vi8x4567 = wasm_v128_load(i8 + 4);
i8 += 8;
const v128_t vk8x0123 = wasm_v128_load(w + 72);
const v128_t vk8x4567 = wasm_v128_load(w + 76);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x4567, vk8x4567), vacc4567p0);
w += 80;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
v128_t vacc4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4567p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
vacc4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 40);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 48);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 56);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 64);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 72);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
w += 4;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 40);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 48);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 56);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 64);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 72);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,316 | 35.15655 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p8c-minmax-wasmrelaxedsimd-fma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p8c__wasmrelaxedsimd_fma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x4567, vk0x4567, vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x4567, vk2x4567, vacc4567p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
const v128_t vk3x4567 = wasm_v128_load(w + 36);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p1);
vacc4567p1 = __builtin_wasm_relaxed_madd_f32x4(vi3x4567, vk3x4567, vacc4567p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vi4x4567 = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vk4x0123 = wasm_v128_load(w + 40);
const v128_t vk4x4567 = wasm_v128_load(w + 44);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x4567, vk4x4567, vacc4567p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vi5x4567 = wasm_v128_load(i5 + 4);
i5 += 8;
const v128_t vk5x0123 = wasm_v128_load(w + 48);
const v128_t vk5x4567 = wasm_v128_load(w + 52);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi5x0123, vk5x0123, vacc0123p1);
vacc4567p1 = __builtin_wasm_relaxed_madd_f32x4(vi5x4567, vk5x4567, vacc4567p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vi6x4567 = wasm_v128_load(i6 + 4);
i6 += 8;
const v128_t vk6x0123 = wasm_v128_load(w + 56);
const v128_t vk6x4567 = wasm_v128_load(w + 60);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi6x0123, vk6x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi6x4567, vk6x4567, vacc4567p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vi7x4567 = wasm_v128_load(i7 + 4);
i7 += 8;
const v128_t vk7x0123 = wasm_v128_load(w + 64);
const v128_t vk7x4567 = wasm_v128_load(w + 68);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi7x0123, vk7x0123, vacc0123p1);
vacc4567p1 = __builtin_wasm_relaxed_madd_f32x4(vi7x4567, vk7x4567, vacc4567p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vi8x4567 = wasm_v128_load(i8 + 4);
i8 += 8;
const v128_t vk8x0123 = wasm_v128_load(w + 72);
const v128_t vk8x4567 = wasm_v128_load(w + 76);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi8x0123, vk8x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi8x4567, vk8x4567, vacc4567p0);
w += 80;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
v128_t vacc4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4567p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
vacc4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 40);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 48);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi5x0123, vk5x0123, vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 56);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi6x0123, vk6x0123, vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 64);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi7x0123, vk7x0123, vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 72);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi8x0123, vk8x0123, vacc0123p0);
w += 4;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 40);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 48);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi5x0123, vk5x0123, vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 56);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi6x0123, vk6x0123, vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 64);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi7x0123, vk7x0123, vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 72);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi8x0123, vk8x0123, vacc0123p0);
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,416 | 35.476038 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p8c-minmax-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p8c__wasmrelaxedsimd_fma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x4567, vk0x4567, vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x4567, vk1x4567, vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x4567, vk2x4567, vacc4567p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
const v128_t vk3x4567 = wasm_v128_load(w + 36);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x4567, vk3x4567, vacc4567p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vi4x4567 = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vk4x0123 = wasm_v128_load(w + 40);
const v128_t vk4x4567 = wasm_v128_load(w + 44);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x4567, vk4x4567, vacc4567p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vi5x4567 = wasm_v128_load(i5 + 4);
i5 += 8;
const v128_t vk5x0123 = wasm_v128_load(w + 48);
const v128_t vk5x4567 = wasm_v128_load(w + 52);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi5x0123, vk5x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi5x4567, vk5x4567, vacc4567p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vi6x4567 = wasm_v128_load(i6 + 4);
i6 += 8;
const v128_t vk6x0123 = wasm_v128_load(w + 56);
const v128_t vk6x4567 = wasm_v128_load(w + 60);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi6x0123, vk6x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi6x4567, vk6x4567, vacc4567p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vi7x4567 = wasm_v128_load(i7 + 4);
i7 += 8;
const v128_t vk7x0123 = wasm_v128_load(w + 64);
const v128_t vk7x4567 = wasm_v128_load(w + 68);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi7x0123, vk7x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi7x4567, vk7x4567, vacc4567p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vi8x4567 = wasm_v128_load(i8 + 4);
i8 += 8;
const v128_t vk8x0123 = wasm_v128_load(w + 72);
const v128_t vk8x4567 = wasm_v128_load(w + 76);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi8x0123, vk8x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi8x4567, vk8x4567, vacc4567p0);
w += 80;
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
v128_t vacc4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4567p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
vacc4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 40);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 48);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi5x0123, vk5x0123, vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 56);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi6x0123, vk6x0123, vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 64);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi7x0123, vk7x0123, vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 72);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi8x0123, vk8x0123, vacc0123p0);
w += 4;
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 40);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 48);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi5x0123, vk5x0123, vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 56);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi6x0123, vk6x0123, vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 64);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi7x0123, vk7x0123, vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 72);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi8x0123, vk8x0123, vacc0123p0);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,118 | 35.336601 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p8c-minmax-wasmrelaxedsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p8c__wasmrelaxedsimd(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vk1x4567), vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
const v128_t vk3x4567 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x4567, vk3x4567), vacc4567p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vi4x4567 = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vk4x0123 = wasm_v128_load(w + 40);
const v128_t vk4x4567 = wasm_v128_load(w + 44);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x4567, vk4x4567), vacc4567p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vi5x4567 = wasm_v128_load(i5 + 4);
i5 += 8;
const v128_t vk5x0123 = wasm_v128_load(w + 48);
const v128_t vk5x4567 = wasm_v128_load(w + 52);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x4567, vk5x4567), vacc4567p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vi6x4567 = wasm_v128_load(i6 + 4);
i6 += 8;
const v128_t vk6x0123 = wasm_v128_load(w + 56);
const v128_t vk6x4567 = wasm_v128_load(w + 60);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x4567, vk6x4567), vacc4567p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vi7x4567 = wasm_v128_load(i7 + 4);
i7 += 8;
const v128_t vk7x0123 = wasm_v128_load(w + 64);
const v128_t vk7x4567 = wasm_v128_load(w + 68);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x4567, vk7x4567), vacc4567p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vi8x4567 = wasm_v128_load(i8 + 4);
i8 += 8;
const v128_t vk8x0123 = wasm_v128_load(w + 72);
const v128_t vk8x4567 = wasm_v128_load(w + 76);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x4567, vk8x4567), vacc4567p0);
w += 80;
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
v128_t vacc4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4567p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
vacc4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 40);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 48);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 56);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 64);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 72);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
w += 4;
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 40);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 48);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 56);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 64);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 72);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,006 | 34.970588 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p8c-minmax-wasmsimd-arm-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p8c__wasmsimd_arm_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
const v128_t vk3x4567 = wasm_v128_load(w + 36);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
vacc4567p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x4567, vk3x4567), vacc4567p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vi4x4567 = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vk4x0123 = wasm_v128_load(w + 40);
const v128_t vk4x4567 = wasm_v128_load(w + 44);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x4567, vk4x4567), vacc4567p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vi5x4567 = wasm_v128_load(i5 + 4);
i5 += 8;
const v128_t vk5x0123 = wasm_v128_load(w + 48);
const v128_t vk5x4567 = wasm_v128_load(w + 52);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
vacc4567p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x4567, vk5x4567), vacc4567p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vi6x4567 = wasm_v128_load(i6 + 4);
i6 += 8;
const v128_t vk6x0123 = wasm_v128_load(w + 56);
const v128_t vk6x4567 = wasm_v128_load(w + 60);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x4567, vk6x4567), vacc4567p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vi7x4567 = wasm_v128_load(i7 + 4);
i7 += 8;
const v128_t vk7x0123 = wasm_v128_load(w + 64);
const v128_t vk7x4567 = wasm_v128_load(w + 68);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
vacc4567p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x4567, vk7x4567), vacc4567p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vi8x4567 = wasm_v128_load(i8 + 4);
i8 += 8;
const v128_t vk8x0123 = wasm_v128_load(w + 72);
const v128_t vk8x4567 = wasm_v128_load(w + 76);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x4567, vk8x4567), vacc4567p0);
w += 80;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
v128_t vacc4567 = wasm_f32x4_max(vmin, vacc4567p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
vacc4567 = wasm_f32x4_min(vmax, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 40);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 48);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 56);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 64);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 72);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
w += 4;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 40);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 48);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 56);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 64);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 72);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,169 | 34.686901 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p8c-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p8c__wasmsimd_arm(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vk1x4567), vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
const v128_t vk3x4567 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x4567, vk3x4567), vacc4567p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vi4x4567 = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vk4x0123 = wasm_v128_load(w + 40);
const v128_t vk4x4567 = wasm_v128_load(w + 44);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x4567, vk4x4567), vacc4567p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vi5x4567 = wasm_v128_load(i5 + 4);
i5 += 8;
const v128_t vk5x0123 = wasm_v128_load(w + 48);
const v128_t vk5x4567 = wasm_v128_load(w + 52);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x4567, vk5x4567), vacc4567p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vi6x4567 = wasm_v128_load(i6 + 4);
i6 += 8;
const v128_t vk6x0123 = wasm_v128_load(w + 56);
const v128_t vk6x4567 = wasm_v128_load(w + 60);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x4567, vk6x4567), vacc4567p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vi7x4567 = wasm_v128_load(i7 + 4);
i7 += 8;
const v128_t vk7x0123 = wasm_v128_load(w + 64);
const v128_t vk7x4567 = wasm_v128_load(w + 68);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x4567, vk7x4567), vacc4567p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vi8x4567 = wasm_v128_load(i8 + 4);
i8 += 8;
const v128_t vk8x0123 = wasm_v128_load(w + 72);
const v128_t vk8x4567 = wasm_v128_load(w + 76);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x4567, vk8x4567), vacc4567p0);
w += 80;
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
v128_t vacc4567 = wasm_f32x4_max(vmin, vacc4567p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
vacc4567 = wasm_f32x4_min(vmax, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 40);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 48);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 56);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 64);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 72);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
w += 4;
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 40);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 48);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 56);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 64);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 72);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,859 | 34.490196 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p8c-minmax-wasmsimd-x86-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p8c__wasmsimd_x86_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
const v128_t vk3x4567 = wasm_v128_load(w + 36);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
vacc4567p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x4567, vk3x4567), vacc4567p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vi4x4567 = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vk4x0123 = wasm_v128_load(w + 40);
const v128_t vk4x4567 = wasm_v128_load(w + 44);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x4567, vk4x4567), vacc4567p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vi5x4567 = wasm_v128_load(i5 + 4);
i5 += 8;
const v128_t vk5x0123 = wasm_v128_load(w + 48);
const v128_t vk5x4567 = wasm_v128_load(w + 52);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
vacc4567p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x4567, vk5x4567), vacc4567p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vi6x4567 = wasm_v128_load(i6 + 4);
i6 += 8;
const v128_t vk6x0123 = wasm_v128_load(w + 56);
const v128_t vk6x4567 = wasm_v128_load(w + 60);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x4567, vk6x4567), vacc4567p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vi7x4567 = wasm_v128_load(i7 + 4);
i7 += 8;
const v128_t vk7x0123 = wasm_v128_load(w + 64);
const v128_t vk7x4567 = wasm_v128_load(w + 68);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
vacc4567p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x4567, vk7x4567), vacc4567p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vi8x4567 = wasm_v128_load(i8 + 4);
i8 += 8;
const v128_t vk8x0123 = wasm_v128_load(w + 72);
const v128_t vk8x4567 = wasm_v128_load(w + 76);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x4567, vk8x4567), vacc4567p0);
w += 80;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
v128_t vacc4567 = wasm_f32x4_pmax(vmin, vacc4567p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
vacc4567 = wasm_f32x4_pmin(vmax, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 40);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 48);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 56);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 64);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 72);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
w += 4;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 40);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 48);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 56);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 64);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 72);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,177 | 34.71246 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p8c-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_9p8c__wasmsimd_x86(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vk1x4567), vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
const v128_t vk3x4567 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x4567, vk3x4567), vacc4567p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vi4x4567 = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vk4x0123 = wasm_v128_load(w + 40);
const v128_t vk4x4567 = wasm_v128_load(w + 44);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x4567, vk4x4567), vacc4567p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vi5x4567 = wasm_v128_load(i5 + 4);
i5 += 8;
const v128_t vk5x0123 = wasm_v128_load(w + 48);
const v128_t vk5x4567 = wasm_v128_load(w + 52);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x4567, vk5x4567), vacc4567p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vi6x4567 = wasm_v128_load(i6 + 4);
i6 += 8;
const v128_t vk6x0123 = wasm_v128_load(w + 56);
const v128_t vk6x4567 = wasm_v128_load(w + 60);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x4567, vk6x4567), vacc4567p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vi7x4567 = wasm_v128_load(i7 + 4);
i7 += 8;
const v128_t vk7x0123 = wasm_v128_load(w + 64);
const v128_t vk7x4567 = wasm_v128_load(w + 68);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x4567, vk7x4567), vacc4567p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vi8x4567 = wasm_v128_load(i8 + 4);
i8 += 8;
const v128_t vk8x0123 = wasm_v128_load(w + 72);
const v128_t vk8x4567 = wasm_v128_load(w + 76);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x4567, vk8x4567), vacc4567p0);
w += 80;
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
v128_t vacc4567 = wasm_f32x4_pmax(vmin, vacc4567p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
vacc4567 = wasm_f32x4_pmin(vmax, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 40);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 48);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 56);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 64);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 72);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
w += 4;
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 40);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 48);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 56);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 64);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 72);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,867 | 34.51634 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p8c-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_ukernel_9p8c__wasmrelaxedsimd_fma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x4567, vk0x4567, vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x4567, vk1x4567, vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x4567, vk2x4567, vacc4567p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
const v128_t vk3x4567 = wasm_v128_load(w + 36);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x4567, vk3x4567, vacc4567p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vi4x4567 = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vk4x0123 = wasm_v128_load(w + 40);
const v128_t vk4x4567 = wasm_v128_load(w + 44);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x4567, vk4x4567, vacc4567p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vi5x4567 = wasm_v128_load(i5 + 4);
i5 += 8;
const v128_t vk5x0123 = wasm_v128_load(w + 48);
const v128_t vk5x4567 = wasm_v128_load(w + 52);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi5x0123, vk5x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi5x4567, vk5x4567, vacc4567p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vi6x4567 = wasm_v128_load(i6 + 4);
i6 += 8;
const v128_t vk6x0123 = wasm_v128_load(w + 56);
const v128_t vk6x4567 = wasm_v128_load(w + 60);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi6x0123, vk6x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi6x4567, vk6x4567, vacc4567p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vi7x4567 = wasm_v128_load(i7 + 4);
i7 += 8;
const v128_t vk7x0123 = wasm_v128_load(w + 64);
const v128_t vk7x4567 = wasm_v128_load(w + 68);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi7x0123, vk7x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi7x4567, vk7x4567, vacc4567p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vi8x4567 = wasm_v128_load(i8 + 4);
i8 += 8;
const v128_t vk8x0123 = wasm_v128_load(w + 72);
const v128_t vk8x4567 = wasm_v128_load(w + 76);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi8x0123, vk8x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi8x4567, vk8x4567, vacc4567p0);
w += 80;
const v128_t vacc0123 = vacc0123p0;
const v128_t vacc4567 = vacc4567p0;
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 40);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 48);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi5x0123, vk5x0123, vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 56);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi6x0123, vk6x0123, vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 64);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi7x0123, vk7x0123, vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 72);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi8x0123, vk8x0123, vacc0123p0);
w += 4;
const v128_t vacc0123 = vacc0123p0;
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 40);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 48);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi5x0123, vk5x0123, vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 56);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi6x0123, vk6x0123, vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 64);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi7x0123, vk7x0123, vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 72);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi8x0123, vk8x0123, vacc0123p0);
v128_t vacc0123 = vacc0123p0;
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,565 | 34.337793 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p8c-wasmsimd-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_ukernel_9p8c__wasmsimd_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
const v128_t vk3x4567 = wasm_v128_load(w + 36);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
vacc4567p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x4567, vk3x4567), vacc4567p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vi4x4567 = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vk4x0123 = wasm_v128_load(w + 40);
const v128_t vk4x4567 = wasm_v128_load(w + 44);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x4567, vk4x4567), vacc4567p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vi5x4567 = wasm_v128_load(i5 + 4);
i5 += 8;
const v128_t vk5x0123 = wasm_v128_load(w + 48);
const v128_t vk5x4567 = wasm_v128_load(w + 52);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
vacc4567p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x4567, vk5x4567), vacc4567p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vi6x4567 = wasm_v128_load(i6 + 4);
i6 += 8;
const v128_t vk6x0123 = wasm_v128_load(w + 56);
const v128_t vk6x4567 = wasm_v128_load(w + 60);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x4567, vk6x4567), vacc4567p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vi7x4567 = wasm_v128_load(i7 + 4);
i7 += 8;
const v128_t vk7x0123 = wasm_v128_load(w + 64);
const v128_t vk7x4567 = wasm_v128_load(w + 68);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
vacc4567p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x4567, vk7x4567), vacc4567p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vi8x4567 = wasm_v128_load(i8 + 4);
i8 += 8;
const v128_t vk8x0123 = wasm_v128_load(w + 72);
const v128_t vk8x4567 = wasm_v128_load(w + 76);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x4567, vk8x4567), vacc4567p0);
w += 80;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
const v128_t vacc0123 = vacc0123p0;
const v128_t vacc4567 = vacc4567p0;
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 40);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 48);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 56);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 64);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 72);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
w += 4;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
const v128_t vacc0123 = vacc0123p0;
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 40);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 48);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 56);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 64);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 72);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = vacc0123p0;
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,756 | 34.153595 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-9p8c-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_ukernel_9p8c__wasmsimd(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vk1x4567), vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
const v128_t vk3x4567 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x4567, vk3x4567), vacc4567p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vi4x4567 = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vk4x0123 = wasm_v128_load(w + 40);
const v128_t vk4x4567 = wasm_v128_load(w + 44);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x4567, vk4x4567), vacc4567p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vi5x4567 = wasm_v128_load(i5 + 4);
i5 += 8;
const v128_t vk5x0123 = wasm_v128_load(w + 48);
const v128_t vk5x4567 = wasm_v128_load(w + 52);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x4567, vk5x4567), vacc4567p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vi6x4567 = wasm_v128_load(i6 + 4);
i6 += 8;
const v128_t vk6x0123 = wasm_v128_load(w + 56);
const v128_t vk6x4567 = wasm_v128_load(w + 60);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x4567, vk6x4567), vacc4567p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vi7x4567 = wasm_v128_load(i7 + 4);
i7 += 8;
const v128_t vk7x0123 = wasm_v128_load(w + 64);
const v128_t vk7x4567 = wasm_v128_load(w + 68);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x4567, vk7x4567), vacc4567p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vi8x4567 = wasm_v128_load(i8 + 4);
i8 += 8;
const v128_t vk8x0123 = wasm_v128_load(w + 72);
const v128_t vk8x4567 = wasm_v128_load(w + 76);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x4567, vk8x4567), vacc4567p0);
w += 80;
const v128_t vacc0123 = vacc0123p0;
const v128_t vacc4567 = vacc4567p0;
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 40);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 48);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 56);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 64);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 72);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
w += 4;
const v128_t vacc0123 = vacc0123p0;
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 40);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 48);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 56);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 64);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 72);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
v128_t vacc0123 = vacc0123p0;
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,446 | 33.939799 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-aarch64-neonfma-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__aarch64_neonfma_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
float32x4_t vo0p1 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x3456, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x3456, vget_high_f32(vw4567), 1);
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x5678, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vo0p0 = vaddq_f32(vo0p0, vo0p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
float32x4_t vo0p1 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x3456, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x3456, vget_high_f32(vw4567), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x5678, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (2 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 5,833 | 31.775281 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-aarch64-neonfma-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__aarch64_neonfma_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
float32x4_t vo0p1 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw4567), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi2x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x3456, vget_low_f32(vw4567), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi2x3456, vget_high_f32(vw4567), 1);
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x5678, vget_high_f32(vw4567), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi2x5678, vw89, 1);
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
float32x4_t vo0p1 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw4567), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi2x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x3456, vget_low_f32(vw4567), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi2x3456, vget_high_f32(vw4567), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x5678, vget_high_f32(vw4567), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi2x5678, vw89, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (2 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 5,921 | 31.9 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-aarch64-neonfma-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__aarch64_neonfma_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
float32x4_t vo0p1 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw4567), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi2x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
float32x4_t vo0p3 = vmulq_lane_f32(vi0x3456, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x3456, vget_high_f32(vw4567), 1);
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x5678, vget_high_f32(vw0123), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1x5678, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
float32x4_t vo0p1 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw4567), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi2x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
float32x4_t vo0p3 = vmulq_lane_f32(vi0x3456, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x3456, vget_high_f32(vw4567), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x5678, vget_high_f32(vw0123), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1x5678, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (2 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 6,009 | 32.021978 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-aarch64-neonfma-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__aarch64_neonfma_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1);
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (2 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 5,740 | 31.619318 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-aarch64-neonfma-2x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__aarch64_neonfma_2x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 0);
float32x4_t vo0p1 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw4567), 1);
float32x4_t vo1p1 = vmulq_lane_f32(vi2x4567, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x3456, vget_low_f32(vw0123), 1);
vo1p1 = vfmaq_lane_f32(vo1p1, vi1x3456, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x3456, vget_high_f32(vw4567), 1);
vo1p1 = vfmaq_lane_f32(vo1p1, vi3x3456, vget_high_f32(vw4567), 1);
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x5678, vget_high_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x5678, vget_high_f32(vw4567), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi2x5678, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x5678, vw89, 1);
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo1p0 = vaddq_f32(vo1p0, vo1p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 0);
float32x4_t vo0p1 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw4567), 1);
float32x4_t vo1p1 = vmulq_lane_f32(vi2x4567, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x3456, vget_low_f32(vw0123), 1);
vo1p1 = vfmaq_lane_f32(vo1p1, vi1x3456, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x3456, vget_high_f32(vw4567), 1);
vo1p1 = vfmaq_lane_f32(vo1p1, vi3x3456, vget_high_f32(vw4567), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x5678, vget_high_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x5678, vget_high_f32(vw4567), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi2x5678, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x5678, vw89, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo1p0 = vaddq_f32(vo1p0, vo1p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
float32x2_t vo1_lo = vget_low_f32(vo1);
if (w & (2 * sizeof(float))) {
vst1_f32(o1, vo1_lo); o1 += 2;
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
vo1_lo = vget_high_f32(vo1);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o1, vo1_lo, 0); o1 += 1;
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 8,648 | 36.604348 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-aarch64-neonfma-2x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__aarch64_neonfma_2x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x3456, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x3456, vget_high_f32(vw4567), 1);
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x5678, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_high_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x5678, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x5678, vw89, 1);
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x3456, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x3456, vget_high_f32(vw4567), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x5678, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_high_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x5678, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x5678, vw89, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
float32x2_t vo1_lo = vget_low_f32(vo1);
if (w & (2 * sizeof(float))) {
vst1_f32(o1, vo1_lo); o1 += 2;
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
vo1_lo = vget_high_f32(vo1);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o1, vo1_lo, 0); o1 += 1;
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 8,467 | 36.469027 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-aarch64-neonfma-3x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__aarch64_neonfma_3x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo2p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi2x4567, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw4567), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi3x4567, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x4567, vw89, 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi4x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x3456, vget_low_f32(vw0123), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi2x3456, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi3x3456, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x3456, vget_high_f32(vw4567), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi4x3456, vget_high_f32(vw4567), 1);
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x5678, vget_high_f32(vw0123), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi2x5678, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_high_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x5678, vget_high_f32(vw4567), 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi3x5678, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x5678, vw89, 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi4x5678, vw89, 1);
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
float32x4_t vo2 = vmaxq_f32(vo2p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vo2 = vminq_f32(vo2, vmax);
vst1q_f32(o2, vo2); o2 += 4;
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo2p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi2x4567, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw4567), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi3x4567, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x4567, vw89, 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi4x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x3456, vget_low_f32(vw0123), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi2x3456, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi3x3456, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x3456, vget_high_f32(vw4567), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi4x3456, vget_high_f32(vw4567), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x5678, vget_high_f32(vw0123), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi2x5678, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_high_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x5678, vget_high_f32(vw4567), 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi3x5678, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x5678, vw89, 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi4x5678, vw89, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
float32x4_t vo2 = vmaxq_f32(vo2p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vo2 = vminq_f32(vo2, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
vst1q_f32(o2, vo2); o2 += 4;
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
float32x2_t vo1_lo = vget_low_f32(vo1);
float32x2_t vo2_lo = vget_low_f32(vo2);
if (w & (2 * sizeof(float))) {
vst1_f32(o2, vo2_lo); o2 += 2;
vst1_f32(o1, vo1_lo); o1 += 2;
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
vo1_lo = vget_high_f32(vo1);
vo2_lo = vget_high_f32(vo2);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o2, vo2_lo, 0); o2 += 1;
vst1_lane_f32(o1, vo1_lo, 0); o1 += 1;
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i3 - input_decrement);
i1 = (const float*) ((uintptr_t) i4 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
o0 = o2;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
output_height = doz(output_height, 3);
} while (output_height != 0);
}
| 11,140 | 39.660584 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-aarch64-neonfma-4x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__aarch64_neonfma_4x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
float32x4_t vi5x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo2p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo3p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5x89AB = vld1q_f32(i5); i5 += 4;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi2x4567, vget_high_f32(vw0123), 0);
vo3p0 = vfmaq_lane_f32(vo3p0, vi3x4567, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw4567), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi3x4567, vget_low_f32(vw4567), 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi4x4567, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x4567, vw89, 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi4x4567, vw89, 0);
vo3p0 = vfmaq_lane_f32(vo3p0, vi5x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x3456, vget_low_f32(vw0123), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi2x3456, vget_low_f32(vw0123), 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi3x3456, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi3x3456, vget_low_f32(vw4567), 0);
vo3p0 = vfmaq_lane_f32(vo3p0, vi4x3456, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x3456, vget_high_f32(vw4567), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi4x3456, vget_high_f32(vw4567), 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi5x3456, vget_high_f32(vw4567), 1);
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
vi5x0123 = vi5x4567;
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vi5x89AB, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x5678, vget_high_f32(vw0123), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi2x5678, vget_high_f32(vw0123), 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi3x5678, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_high_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x5678, vget_high_f32(vw4567), 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi3x5678, vget_high_f32(vw4567), 0);
vo3p0 = vfmaq_lane_f32(vo3p0, vi4x5678, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x5678, vw89, 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi4x5678, vw89, 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi5x5678, vw89, 1);
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
vi5x4567 = vi5x89AB;
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
float32x4_t vo2 = vmaxq_f32(vo2p0, vmin);
float32x4_t vo3 = vmaxq_f32(vo3p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vo2 = vminq_f32(vo2, vmax);
vo3 = vminq_f32(vo3, vmax);
vst1q_f32(o3, vo3); o3 += 4;
vst1q_f32(o2, vo2); o2 += 4;
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo2p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo3p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
vi5x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi5x4567)));
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi2x4567, vget_high_f32(vw0123), 0);
vo3p0 = vfmaq_lane_f32(vo3p0, vi3x4567, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw4567), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi3x4567, vget_low_f32(vw4567), 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi4x4567, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x4567, vw89, 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi4x4567, vw89, 0);
vo3p0 = vfmaq_lane_f32(vo3p0, vi5x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x3456, vget_low_f32(vw0123), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi2x3456, vget_low_f32(vw0123), 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi3x3456, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi3x3456, vget_low_f32(vw4567), 0);
vo3p0 = vfmaq_lane_f32(vo3p0, vi4x3456, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x3456, vget_high_f32(vw4567), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi4x3456, vget_high_f32(vw4567), 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi5x3456, vget_high_f32(vw4567), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vzero, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x5678, vget_high_f32(vw0123), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi2x5678, vget_high_f32(vw0123), 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi3x5678, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_high_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x5678, vget_high_f32(vw4567), 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi3x5678, vget_high_f32(vw4567), 0);
vo3p0 = vfmaq_lane_f32(vo3p0, vi4x5678, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x5678, vw89, 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi4x5678, vw89, 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi5x5678, vw89, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
float32x4_t vo2 = vmaxq_f32(vo2p0, vmin);
float32x4_t vo3 = vmaxq_f32(vo3p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vo2 = vminq_f32(vo2, vmax);
vo3 = vminq_f32(vo3, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
vst1q_f32(o3, vo3); o3 += 4;
vst1q_f32(o2, vo2); o2 += 4;
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
float32x2_t vo1_lo = vget_low_f32(vo1);
float32x2_t vo2_lo = vget_low_f32(vo2);
float32x2_t vo3_lo = vget_low_f32(vo3);
if (w & (2 * sizeof(float))) {
vst1_f32(o3, vo3_lo); o3 += 2;
vst1_f32(o2, vo2_lo); o2 += 2;
vst1_f32(o1, vo1_lo); o1 += 2;
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
vo1_lo = vget_high_f32(vo1);
vo2_lo = vget_high_f32(vo2);
vo3_lo = vget_high_f32(vo3);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o3, vo3_lo, 0); o3 += 1;
vst1_lane_f32(o2, vo2_lo, 0); o2 += 1;
vst1_lane_f32(o1, vo1_lo, 0); o1 += 1;
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i4 - input_decrement);
i1 = (const float*) ((uintptr_t) i5 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
o0 = o3;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
output_height = doz(output_height, 4);
} while (output_height != 0);
}
| 13,813 | 41.900621 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-aarch64-neonfma-5x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__aarch64_neonfma_5x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
float* o4 = (float*) ((uintptr_t) o3 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
o4 = o3;
}
if XNN_UNPREDICTABLE(output_height < 6) {
i6 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
float32x4_t vi5x0123 = vmovq_n_f32(0.0f);
float32x4_t vi6x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo2p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo3p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo4p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5x89AB = vld1q_f32(i5); i5 += 4;
const float32x4_t vi6x89AB = vld1q_f32(i6); i6 += 4;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi2x4567, vget_high_f32(vw0123), 0);
vo3p0 = vfmaq_lane_f32(vo3p0, vi3x4567, vget_high_f32(vw0123), 0);
vo4p0 = vfmaq_lane_f32(vo4p0, vi4x4567, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw4567), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi3x4567, vget_low_f32(vw4567), 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi4x4567, vget_low_f32(vw4567), 1);
vo4p0 = vfmaq_lane_f32(vo4p0, vi5x4567, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x4567, vw89, 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi4x4567, vw89, 0);
vo3p0 = vfmaq_lane_f32(vo3p0, vi5x4567, vw89, 0);
vo4p0 = vfmaq_lane_f32(vo4p0, vi6x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
const float32x4_t vi6x3456 = vextq_f32(vi6x0123, vi6x4567, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x3456, vget_low_f32(vw0123), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi2x3456, vget_low_f32(vw0123), 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi3x3456, vget_low_f32(vw0123), 1);
vo4p0 = vfmaq_lane_f32(vo4p0, vi4x3456, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi3x3456, vget_low_f32(vw4567), 0);
vo3p0 = vfmaq_lane_f32(vo3p0, vi4x3456, vget_low_f32(vw4567), 0);
vo4p0 = vfmaq_lane_f32(vo4p0, vi5x3456, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x3456, vget_high_f32(vw4567), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi4x3456, vget_high_f32(vw4567), 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi5x3456, vget_high_f32(vw4567), 1);
vo4p0 = vfmaq_lane_f32(vo4p0, vi6x3456, vget_high_f32(vw4567), 1);
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
vi5x0123 = vi5x4567;
vi6x0123 = vi6x4567;
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vi5x89AB, 1);
const float32x4_t vi6x5678 = vextq_f32(vi6x4567, vi6x89AB, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x5678, vget_high_f32(vw0123), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi2x5678, vget_high_f32(vw0123), 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi3x5678, vget_high_f32(vw0123), 1);
vo4p0 = vfmaq_lane_f32(vo4p0, vi4x5678, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_high_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x5678, vget_high_f32(vw4567), 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi3x5678, vget_high_f32(vw4567), 0);
vo3p0 = vfmaq_lane_f32(vo3p0, vi4x5678, vget_high_f32(vw4567), 0);
vo4p0 = vfmaq_lane_f32(vo4p0, vi5x5678, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x5678, vw89, 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi4x5678, vw89, 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi5x5678, vw89, 1);
vo4p0 = vfmaq_lane_f32(vo4p0, vi6x5678, vw89, 1);
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
vi5x4567 = vi5x89AB;
vi6x4567 = vi6x89AB;
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
float32x4_t vo2 = vmaxq_f32(vo2p0, vmin);
float32x4_t vo3 = vmaxq_f32(vo3p0, vmin);
float32x4_t vo4 = vmaxq_f32(vo4p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vo2 = vminq_f32(vo2, vmax);
vo3 = vminq_f32(vo3, vmax);
vo4 = vminq_f32(vo4, vmax);
vst1q_f32(o4, vo4); o4 += 4;
vst1q_f32(o3, vo3); o3 += 4;
vst1q_f32(o2, vo2); o2 += 4;
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo2p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo3p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo4p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
vi5x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi5x4567)));
vi6x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi6x4567)));
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi2x4567, vget_high_f32(vw0123), 0);
vo3p0 = vfmaq_lane_f32(vo3p0, vi3x4567, vget_high_f32(vw0123), 0);
vo4p0 = vfmaq_lane_f32(vo4p0, vi4x4567, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw4567), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi3x4567, vget_low_f32(vw4567), 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi4x4567, vget_low_f32(vw4567), 1);
vo4p0 = vfmaq_lane_f32(vo4p0, vi5x4567, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x4567, vw89, 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi4x4567, vw89, 0);
vo3p0 = vfmaq_lane_f32(vo3p0, vi5x4567, vw89, 0);
vo4p0 = vfmaq_lane_f32(vo4p0, vi6x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
const float32x4_t vi6x3456 = vextq_f32(vi6x0123, vi6x4567, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x3456, vget_low_f32(vw0123), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi2x3456, vget_low_f32(vw0123), 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi3x3456, vget_low_f32(vw0123), 1);
vo4p0 = vfmaq_lane_f32(vo4p0, vi4x3456, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi3x3456, vget_low_f32(vw4567), 0);
vo3p0 = vfmaq_lane_f32(vo3p0, vi4x3456, vget_low_f32(vw4567), 0);
vo4p0 = vfmaq_lane_f32(vo4p0, vi5x3456, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x3456, vget_high_f32(vw4567), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi4x3456, vget_high_f32(vw4567), 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi5x3456, vget_high_f32(vw4567), 1);
vo4p0 = vfmaq_lane_f32(vo4p0, vi6x3456, vget_high_f32(vw4567), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vzero, 1);
const float32x4_t vi6x5678 = vextq_f32(vi6x4567, vzero, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x5678, vget_high_f32(vw0123), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi2x5678, vget_high_f32(vw0123), 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi3x5678, vget_high_f32(vw0123), 1);
vo4p0 = vfmaq_lane_f32(vo4p0, vi4x5678, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_high_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x5678, vget_high_f32(vw4567), 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi3x5678, vget_high_f32(vw4567), 0);
vo3p0 = vfmaq_lane_f32(vo3p0, vi4x5678, vget_high_f32(vw4567), 0);
vo4p0 = vfmaq_lane_f32(vo4p0, vi5x5678, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x5678, vw89, 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi4x5678, vw89, 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi5x5678, vw89, 1);
vo4p0 = vfmaq_lane_f32(vo4p0, vi6x5678, vw89, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
float32x4_t vo2 = vmaxq_f32(vo2p0, vmin);
float32x4_t vo3 = vmaxq_f32(vo3p0, vmin);
float32x4_t vo4 = vmaxq_f32(vo4p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vo2 = vminq_f32(vo2, vmax);
vo3 = vminq_f32(vo3, vmax);
vo4 = vminq_f32(vo4, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
vst1q_f32(o4, vo4); o4 += 4;
vst1q_f32(o3, vo3); o3 += 4;
vst1q_f32(o2, vo2); o2 += 4;
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
float32x2_t vo1_lo = vget_low_f32(vo1);
float32x2_t vo2_lo = vget_low_f32(vo2);
float32x2_t vo3_lo = vget_low_f32(vo3);
float32x2_t vo4_lo = vget_low_f32(vo4);
if (w & (2 * sizeof(float))) {
vst1_f32(o4, vo4_lo); o4 += 2;
vst1_f32(o3, vo3_lo); o3 += 2;
vst1_f32(o2, vo2_lo); o2 += 2;
vst1_f32(o1, vo1_lo); o1 += 2;
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
vo1_lo = vget_high_f32(vo1);
vo2_lo = vget_high_f32(vo2);
vo3_lo = vget_high_f32(vo3);
vo4_lo = vget_high_f32(vo4);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o4, vo4_lo, 0); o4 += 1;
vst1_lane_f32(o3, vo3_lo, 0); o3 += 1;
vst1_lane_f32(o2, vo2_lo, 0); o2 += 1;
vst1_lane_f32(o1, vo1_lo, 0); o1 += 1;
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i5 - input_decrement);
i1 = (const float*) ((uintptr_t) i6 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
i6 = (const float*) ((uintptr_t) i5 + input_width);
o0 = o4;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
o4 = (float*) ((uintptr_t) o3 + input_width);
output_height = doz(output_height, 5);
} while (output_height != 0);
}
| 16,486 | 43.559459 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-aarch64-neonfma-6x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__aarch64_neonfma_6x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
float* o4 = (float*) ((uintptr_t) o3 + input_width);
float* o5 = (float*) ((uintptr_t) o4 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
o4 = o3;
}
if XNN_UNPREDICTABLE(output_height < 6) {
i6 = zero;
o5 = o4;
}
if XNN_UNPREDICTABLE(output_height < 7) {
i7 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
float32x4_t vi5x0123 = vmovq_n_f32(0.0f);
float32x4_t vi6x0123 = vmovq_n_f32(0.0f);
float32x4_t vi7x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo2p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo3p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo4p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo5p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5x89AB = vld1q_f32(i5); i5 += 4;
const float32x4_t vi6x89AB = vld1q_f32(i6); i6 += 4;
const float32x4_t vi7x89AB = vld1q_f32(i7); i7 += 4;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi2x4567, vget_high_f32(vw0123), 0);
vo3p0 = vfmaq_lane_f32(vo3p0, vi3x4567, vget_high_f32(vw0123), 0);
vo4p0 = vfmaq_lane_f32(vo4p0, vi4x4567, vget_high_f32(vw0123), 0);
vo5p0 = vfmaq_lane_f32(vo5p0, vi5x4567, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw4567), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi3x4567, vget_low_f32(vw4567), 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi4x4567, vget_low_f32(vw4567), 1);
vo4p0 = vfmaq_lane_f32(vo4p0, vi5x4567, vget_low_f32(vw4567), 1);
vo5p0 = vfmaq_lane_f32(vo5p0, vi6x4567, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x4567, vw89, 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi4x4567, vw89, 0);
vo3p0 = vfmaq_lane_f32(vo3p0, vi5x4567, vw89, 0);
vo4p0 = vfmaq_lane_f32(vo4p0, vi6x4567, vw89, 0);
vo5p0 = vfmaq_lane_f32(vo5p0, vi7x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
const float32x4_t vi6x3456 = vextq_f32(vi6x0123, vi6x4567, 3);
const float32x4_t vi7x3456 = vextq_f32(vi7x0123, vi7x4567, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x3456, vget_low_f32(vw0123), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi2x3456, vget_low_f32(vw0123), 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi3x3456, vget_low_f32(vw0123), 1);
vo4p0 = vfmaq_lane_f32(vo4p0, vi4x3456, vget_low_f32(vw0123), 1);
vo5p0 = vfmaq_lane_f32(vo5p0, vi5x3456, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi3x3456, vget_low_f32(vw4567), 0);
vo3p0 = vfmaq_lane_f32(vo3p0, vi4x3456, vget_low_f32(vw4567), 0);
vo4p0 = vfmaq_lane_f32(vo4p0, vi5x3456, vget_low_f32(vw4567), 0);
vo5p0 = vfmaq_lane_f32(vo5p0, vi6x3456, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x3456, vget_high_f32(vw4567), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi4x3456, vget_high_f32(vw4567), 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi5x3456, vget_high_f32(vw4567), 1);
vo4p0 = vfmaq_lane_f32(vo4p0, vi6x3456, vget_high_f32(vw4567), 1);
vo5p0 = vfmaq_lane_f32(vo5p0, vi7x3456, vget_high_f32(vw4567), 1);
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
vi5x0123 = vi5x4567;
vi6x0123 = vi6x4567;
vi7x0123 = vi7x4567;
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vi5x89AB, 1);
const float32x4_t vi6x5678 = vextq_f32(vi6x4567, vi6x89AB, 1);
const float32x4_t vi7x5678 = vextq_f32(vi7x4567, vi7x89AB, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x5678, vget_high_f32(vw0123), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi2x5678, vget_high_f32(vw0123), 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi3x5678, vget_high_f32(vw0123), 1);
vo4p0 = vfmaq_lane_f32(vo4p0, vi4x5678, vget_high_f32(vw0123), 1);
vo5p0 = vfmaq_lane_f32(vo5p0, vi5x5678, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_high_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x5678, vget_high_f32(vw4567), 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi3x5678, vget_high_f32(vw4567), 0);
vo3p0 = vfmaq_lane_f32(vo3p0, vi4x5678, vget_high_f32(vw4567), 0);
vo4p0 = vfmaq_lane_f32(vo4p0, vi5x5678, vget_high_f32(vw4567), 0);
vo5p0 = vfmaq_lane_f32(vo5p0, vi6x5678, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x5678, vw89, 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi4x5678, vw89, 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi5x5678, vw89, 1);
vo4p0 = vfmaq_lane_f32(vo4p0, vi6x5678, vw89, 1);
vo5p0 = vfmaq_lane_f32(vo5p0, vi7x5678, vw89, 1);
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
vi5x4567 = vi5x89AB;
vi6x4567 = vi6x89AB;
vi7x4567 = vi7x89AB;
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
float32x4_t vo2 = vmaxq_f32(vo2p0, vmin);
float32x4_t vo3 = vmaxq_f32(vo3p0, vmin);
float32x4_t vo4 = vmaxq_f32(vo4p0, vmin);
float32x4_t vo5 = vmaxq_f32(vo5p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vo2 = vminq_f32(vo2, vmax);
vo3 = vminq_f32(vo3, vmax);
vo4 = vminq_f32(vo4, vmax);
vo5 = vminq_f32(vo5, vmax);
vst1q_f32(o5, vo5); o5 += 4;
vst1q_f32(o4, vo4); o4 += 4;
vst1q_f32(o3, vo3); o3 += 4;
vst1q_f32(o2, vo2); o2 += 4;
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo2p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo3p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo4p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo5p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
vi5x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi5x4567)));
vi6x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi6x4567)));
vi7x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi7x4567)));
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi2x4567, vget_high_f32(vw0123), 0);
vo3p0 = vfmaq_lane_f32(vo3p0, vi3x4567, vget_high_f32(vw0123), 0);
vo4p0 = vfmaq_lane_f32(vo4p0, vi4x4567, vget_high_f32(vw0123), 0);
vo5p0 = vfmaq_lane_f32(vo5p0, vi5x4567, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw4567), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi3x4567, vget_low_f32(vw4567), 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi4x4567, vget_low_f32(vw4567), 1);
vo4p0 = vfmaq_lane_f32(vo4p0, vi5x4567, vget_low_f32(vw4567), 1);
vo5p0 = vfmaq_lane_f32(vo5p0, vi6x4567, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x4567, vw89, 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi4x4567, vw89, 0);
vo3p0 = vfmaq_lane_f32(vo3p0, vi5x4567, vw89, 0);
vo4p0 = vfmaq_lane_f32(vo4p0, vi6x4567, vw89, 0);
vo5p0 = vfmaq_lane_f32(vo5p0, vi7x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
const float32x4_t vi6x3456 = vextq_f32(vi6x0123, vi6x4567, 3);
const float32x4_t vi7x3456 = vextq_f32(vi7x0123, vi7x4567, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x3456, vget_low_f32(vw0123), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi2x3456, vget_low_f32(vw0123), 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi3x3456, vget_low_f32(vw0123), 1);
vo4p0 = vfmaq_lane_f32(vo4p0, vi4x3456, vget_low_f32(vw0123), 1);
vo5p0 = vfmaq_lane_f32(vo5p0, vi5x3456, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi3x3456, vget_low_f32(vw4567), 0);
vo3p0 = vfmaq_lane_f32(vo3p0, vi4x3456, vget_low_f32(vw4567), 0);
vo4p0 = vfmaq_lane_f32(vo4p0, vi5x3456, vget_low_f32(vw4567), 0);
vo5p0 = vfmaq_lane_f32(vo5p0, vi6x3456, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x3456, vget_high_f32(vw4567), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi4x3456, vget_high_f32(vw4567), 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi5x3456, vget_high_f32(vw4567), 1);
vo4p0 = vfmaq_lane_f32(vo4p0, vi6x3456, vget_high_f32(vw4567), 1);
vo5p0 = vfmaq_lane_f32(vo5p0, vi7x3456, vget_high_f32(vw4567), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vzero, 1);
const float32x4_t vi6x5678 = vextq_f32(vi6x4567, vzero, 1);
const float32x4_t vi7x5678 = vextq_f32(vi7x4567, vzero, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x5678, vget_high_f32(vw0123), 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi2x5678, vget_high_f32(vw0123), 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi3x5678, vget_high_f32(vw0123), 1);
vo4p0 = vfmaq_lane_f32(vo4p0, vi4x5678, vget_high_f32(vw0123), 1);
vo5p0 = vfmaq_lane_f32(vo5p0, vi5x5678, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_high_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x5678, vget_high_f32(vw4567), 0);
vo2p0 = vfmaq_lane_f32(vo2p0, vi3x5678, vget_high_f32(vw4567), 0);
vo3p0 = vfmaq_lane_f32(vo3p0, vi4x5678, vget_high_f32(vw4567), 0);
vo4p0 = vfmaq_lane_f32(vo4p0, vi5x5678, vget_high_f32(vw4567), 0);
vo5p0 = vfmaq_lane_f32(vo5p0, vi6x5678, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x5678, vw89, 1);
vo2p0 = vfmaq_lane_f32(vo2p0, vi4x5678, vw89, 1);
vo3p0 = vfmaq_lane_f32(vo3p0, vi5x5678, vw89, 1);
vo4p0 = vfmaq_lane_f32(vo4p0, vi6x5678, vw89, 1);
vo5p0 = vfmaq_lane_f32(vo5p0, vi7x5678, vw89, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
float32x4_t vo2 = vmaxq_f32(vo2p0, vmin);
float32x4_t vo3 = vmaxq_f32(vo3p0, vmin);
float32x4_t vo4 = vmaxq_f32(vo4p0, vmin);
float32x4_t vo5 = vmaxq_f32(vo5p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vo2 = vminq_f32(vo2, vmax);
vo3 = vminq_f32(vo3, vmax);
vo4 = vminq_f32(vo4, vmax);
vo5 = vminq_f32(vo5, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
vst1q_f32(o5, vo5); o5 += 4;
vst1q_f32(o4, vo4); o4 += 4;
vst1q_f32(o3, vo3); o3 += 4;
vst1q_f32(o2, vo2); o2 += 4;
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
float32x2_t vo1_lo = vget_low_f32(vo1);
float32x2_t vo2_lo = vget_low_f32(vo2);
float32x2_t vo3_lo = vget_low_f32(vo3);
float32x2_t vo4_lo = vget_low_f32(vo4);
float32x2_t vo5_lo = vget_low_f32(vo5);
if (w & (2 * sizeof(float))) {
vst1_f32(o5, vo5_lo); o5 += 2;
vst1_f32(o4, vo4_lo); o4 += 2;
vst1_f32(o3, vo3_lo); o3 += 2;
vst1_f32(o2, vo2_lo); o2 += 2;
vst1_f32(o1, vo1_lo); o1 += 2;
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
vo1_lo = vget_high_f32(vo1);
vo2_lo = vget_high_f32(vo2);
vo3_lo = vget_high_f32(vo3);
vo4_lo = vget_high_f32(vo4);
vo5_lo = vget_high_f32(vo5);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o5, vo5_lo, 0); o5 += 1;
vst1_lane_f32(o4, vo4_lo, 0); o4 += 1;
vst1_lane_f32(o3, vo3_lo, 0); o3 += 1;
vst1_lane_f32(o2, vo2_lo, 0); o2 += 1;
vst1_lane_f32(o1, vo1_lo, 0); o1 += 1;
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i6 - input_decrement);
i1 = (const float*) ((uintptr_t) i7 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
i6 = (const float*) ((uintptr_t) i5 + input_width);
i7 = (const float*) ((uintptr_t) i6 + input_width);
o0 = o5;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
o4 = (float*) ((uintptr_t) o3 + input_width);
o5 = (float*) ((uintptr_t) o4 + input_width);
output_height = doz(output_height, 6);
} while (output_height != 0);
}
| 19,159 | 44.837321 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-neon-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
float32x4_t vo0p1 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x3456, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x3456, vget_high_f32(vw4567), 1);
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x5678, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vo0p0 = vaddq_f32(vo0p0, vo0p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
float32x4_t vo0p1 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x3456, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x3456, vget_high_f32(vw4567), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x5678, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (2 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 5,822 | 31.713483 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-neon-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
float32x4_t vo0p1 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw4567), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi2x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x3456, vget_low_f32(vw4567), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi2x3456, vget_high_f32(vw4567), 1);
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x5678, vget_high_f32(vw4567), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi2x5678, vw89, 1);
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
float32x4_t vo0p1 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw4567), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi2x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x3456, vget_low_f32(vw4567), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi2x3456, vget_high_f32(vw4567), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x5678, vget_high_f32(vw4567), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi2x5678, vw89, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (2 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 5,910 | 31.838889 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-neon-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
float32x4_t vo0p1 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw4567), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi2x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
float32x4_t vo0p3 = vmulq_lane_f32(vi0x3456, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x3456, vget_high_f32(vw4567), 1);
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x5678, vget_high_f32(vw0123), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1x5678, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
float32x4_t vo0p1 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw4567), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi2x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
float32x4_t vo0p3 = vmulq_lane_f32(vi0x3456, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x3456, vget_high_f32(vw4567), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x5678, vget_high_f32(vw0123), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1x5678, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (2 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 5,998 | 31.961538 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-neon-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1);
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (2 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 5,729 | 31.556818 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-neon-2x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_2x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 0);
float32x4_t vo0p1 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw4567), 1);
float32x4_t vo1p1 = vmulq_lane_f32(vi2x4567, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x3456, vget_low_f32(vw0123), 1);
vo1p1 = vmlaq_lane_f32(vo1p1, vi1x3456, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x3456, vget_high_f32(vw4567), 1);
vo1p1 = vmlaq_lane_f32(vo1p1, vi3x3456, vget_high_f32(vw4567), 1);
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x5678, vget_high_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x5678, vget_high_f32(vw4567), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi2x5678, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x5678, vw89, 1);
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo1p0 = vaddq_f32(vo1p0, vo1p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 0);
float32x4_t vo0p1 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw4567), 1);
float32x4_t vo1p1 = vmulq_lane_f32(vi2x4567, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x3456, vget_low_f32(vw0123), 1);
vo1p1 = vmlaq_lane_f32(vo1p1, vi1x3456, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x3456, vget_high_f32(vw4567), 1);
vo1p1 = vmlaq_lane_f32(vo1p1, vi3x3456, vget_high_f32(vw4567), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x5678, vget_high_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x5678, vget_high_f32(vw4567), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi2x5678, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x5678, vw89, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo1p0 = vaddq_f32(vo1p0, vo1p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
float32x2_t vo1_lo = vget_low_f32(vo1);
if (w & (2 * sizeof(float))) {
vst1_f32(o1, vo1_lo); o1 += 2;
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
vo1_lo = vget_high_f32(vo1);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o1, vo1_lo, 0); o1 += 1;
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 8,637 | 36.556522 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-neon-2x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_2x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x3456, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x3456, vget_high_f32(vw4567), 1);
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x5678, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_high_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x5678, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x5678, vw89, 1);
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x3456, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x3456, vget_high_f32(vw4567), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x5678, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_high_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x5678, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x5678, vw89, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
float32x2_t vo1_lo = vget_low_f32(vo1);
if (w & (2 * sizeof(float))) {
vst1_f32(o1, vo1_lo); o1 += 2;
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
vo1_lo = vget_high_f32(vo1);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o1, vo1_lo, 0); o1 += 1;
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 8,456 | 36.420354 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-neon-3x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_3x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo2p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi2x4567, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw4567), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi3x4567, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x4567, vw89, 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi4x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x3456, vget_low_f32(vw0123), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi2x3456, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi3x3456, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x3456, vget_high_f32(vw4567), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi4x3456, vget_high_f32(vw4567), 1);
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x5678, vget_high_f32(vw0123), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi2x5678, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_high_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x5678, vget_high_f32(vw4567), 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi3x5678, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x5678, vw89, 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi4x5678, vw89, 1);
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
float32x4_t vo2 = vmaxq_f32(vo2p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vo2 = vminq_f32(vo2, vmax);
vst1q_f32(o2, vo2); o2 += 4;
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo2p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi2x4567, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw4567), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi3x4567, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x4567, vw89, 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi4x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x3456, vget_low_f32(vw0123), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi2x3456, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi3x3456, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x3456, vget_high_f32(vw4567), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi4x3456, vget_high_f32(vw4567), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x5678, vget_high_f32(vw0123), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi2x5678, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_high_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x5678, vget_high_f32(vw4567), 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi3x5678, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x5678, vw89, 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi4x5678, vw89, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
float32x4_t vo2 = vmaxq_f32(vo2p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vo2 = vminq_f32(vo2, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
vst1q_f32(o2, vo2); o2 += 4;
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
float32x2_t vo1_lo = vget_low_f32(vo1);
float32x2_t vo2_lo = vget_low_f32(vo2);
if (w & (2 * sizeof(float))) {
vst1_f32(o2, vo2_lo); o2 += 2;
vst1_f32(o1, vo1_lo); o1 += 2;
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
vo1_lo = vget_high_f32(vo1);
vo2_lo = vget_high_f32(vo2);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o2, vo2_lo, 0); o2 += 1;
vst1_lane_f32(o1, vo1_lo, 0); o1 += 1;
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i3 - input_decrement);
i1 = (const float*) ((uintptr_t) i4 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
o0 = o2;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
output_height = doz(output_height, 3);
} while (output_height != 0);
}
| 11,129 | 39.620438 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-neon-4x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_4x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
float32x4_t vi5x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo2p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo3p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5x89AB = vld1q_f32(i5); i5 += 4;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi2x4567, vget_high_f32(vw0123), 0);
vo3p0 = vmlaq_lane_f32(vo3p0, vi3x4567, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw4567), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi3x4567, vget_low_f32(vw4567), 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi4x4567, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x4567, vw89, 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi4x4567, vw89, 0);
vo3p0 = vmlaq_lane_f32(vo3p0, vi5x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x3456, vget_low_f32(vw0123), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi2x3456, vget_low_f32(vw0123), 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi3x3456, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi3x3456, vget_low_f32(vw4567), 0);
vo3p0 = vmlaq_lane_f32(vo3p0, vi4x3456, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x3456, vget_high_f32(vw4567), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi4x3456, vget_high_f32(vw4567), 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi5x3456, vget_high_f32(vw4567), 1);
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
vi5x0123 = vi5x4567;
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vi5x89AB, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x5678, vget_high_f32(vw0123), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi2x5678, vget_high_f32(vw0123), 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi3x5678, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_high_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x5678, vget_high_f32(vw4567), 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi3x5678, vget_high_f32(vw4567), 0);
vo3p0 = vmlaq_lane_f32(vo3p0, vi4x5678, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x5678, vw89, 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi4x5678, vw89, 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi5x5678, vw89, 1);
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
vi5x4567 = vi5x89AB;
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
float32x4_t vo2 = vmaxq_f32(vo2p0, vmin);
float32x4_t vo3 = vmaxq_f32(vo3p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vo2 = vminq_f32(vo2, vmax);
vo3 = vminq_f32(vo3, vmax);
vst1q_f32(o3, vo3); o3 += 4;
vst1q_f32(o2, vo2); o2 += 4;
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo2p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo3p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
vi5x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi5x4567)));
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi2x4567, vget_high_f32(vw0123), 0);
vo3p0 = vmlaq_lane_f32(vo3p0, vi3x4567, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw4567), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi3x4567, vget_low_f32(vw4567), 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi4x4567, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x4567, vw89, 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi4x4567, vw89, 0);
vo3p0 = vmlaq_lane_f32(vo3p0, vi5x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x3456, vget_low_f32(vw0123), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi2x3456, vget_low_f32(vw0123), 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi3x3456, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi3x3456, vget_low_f32(vw4567), 0);
vo3p0 = vmlaq_lane_f32(vo3p0, vi4x3456, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x3456, vget_high_f32(vw4567), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi4x3456, vget_high_f32(vw4567), 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi5x3456, vget_high_f32(vw4567), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vzero, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x5678, vget_high_f32(vw0123), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi2x5678, vget_high_f32(vw0123), 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi3x5678, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_high_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x5678, vget_high_f32(vw4567), 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi3x5678, vget_high_f32(vw4567), 0);
vo3p0 = vmlaq_lane_f32(vo3p0, vi4x5678, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x5678, vw89, 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi4x5678, vw89, 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi5x5678, vw89, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
float32x4_t vo2 = vmaxq_f32(vo2p0, vmin);
float32x4_t vo3 = vmaxq_f32(vo3p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vo2 = vminq_f32(vo2, vmax);
vo3 = vminq_f32(vo3, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
vst1q_f32(o3, vo3); o3 += 4;
vst1q_f32(o2, vo2); o2 += 4;
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
float32x2_t vo1_lo = vget_low_f32(vo1);
float32x2_t vo2_lo = vget_low_f32(vo2);
float32x2_t vo3_lo = vget_low_f32(vo3);
if (w & (2 * sizeof(float))) {
vst1_f32(o3, vo3_lo); o3 += 2;
vst1_f32(o2, vo2_lo); o2 += 2;
vst1_f32(o1, vo1_lo); o1 += 2;
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
vo1_lo = vget_high_f32(vo1);
vo2_lo = vget_high_f32(vo2);
vo3_lo = vget_high_f32(vo3);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o3, vo3_lo, 0); o3 += 1;
vst1_lane_f32(o2, vo2_lo, 0); o2 += 1;
vst1_lane_f32(o1, vo1_lo, 0); o1 += 1;
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i4 - input_decrement);
i1 = (const float*) ((uintptr_t) i5 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
o0 = o3;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
output_height = doz(output_height, 4);
} while (output_height != 0);
}
| 13,802 | 41.86646 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-neon-5x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_5x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
float* o4 = (float*) ((uintptr_t) o3 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
o4 = o3;
}
if XNN_UNPREDICTABLE(output_height < 6) {
i6 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
float32x4_t vi5x0123 = vmovq_n_f32(0.0f);
float32x4_t vi6x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo2p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo3p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo4p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5x89AB = vld1q_f32(i5); i5 += 4;
const float32x4_t vi6x89AB = vld1q_f32(i6); i6 += 4;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi2x4567, vget_high_f32(vw0123), 0);
vo3p0 = vmlaq_lane_f32(vo3p0, vi3x4567, vget_high_f32(vw0123), 0);
vo4p0 = vmlaq_lane_f32(vo4p0, vi4x4567, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw4567), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi3x4567, vget_low_f32(vw4567), 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi4x4567, vget_low_f32(vw4567), 1);
vo4p0 = vmlaq_lane_f32(vo4p0, vi5x4567, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x4567, vw89, 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi4x4567, vw89, 0);
vo3p0 = vmlaq_lane_f32(vo3p0, vi5x4567, vw89, 0);
vo4p0 = vmlaq_lane_f32(vo4p0, vi6x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
const float32x4_t vi6x3456 = vextq_f32(vi6x0123, vi6x4567, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x3456, vget_low_f32(vw0123), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi2x3456, vget_low_f32(vw0123), 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi3x3456, vget_low_f32(vw0123), 1);
vo4p0 = vmlaq_lane_f32(vo4p0, vi4x3456, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi3x3456, vget_low_f32(vw4567), 0);
vo3p0 = vmlaq_lane_f32(vo3p0, vi4x3456, vget_low_f32(vw4567), 0);
vo4p0 = vmlaq_lane_f32(vo4p0, vi5x3456, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x3456, vget_high_f32(vw4567), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi4x3456, vget_high_f32(vw4567), 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi5x3456, vget_high_f32(vw4567), 1);
vo4p0 = vmlaq_lane_f32(vo4p0, vi6x3456, vget_high_f32(vw4567), 1);
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
vi5x0123 = vi5x4567;
vi6x0123 = vi6x4567;
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vi5x89AB, 1);
const float32x4_t vi6x5678 = vextq_f32(vi6x4567, vi6x89AB, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x5678, vget_high_f32(vw0123), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi2x5678, vget_high_f32(vw0123), 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi3x5678, vget_high_f32(vw0123), 1);
vo4p0 = vmlaq_lane_f32(vo4p0, vi4x5678, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_high_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x5678, vget_high_f32(vw4567), 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi3x5678, vget_high_f32(vw4567), 0);
vo3p0 = vmlaq_lane_f32(vo3p0, vi4x5678, vget_high_f32(vw4567), 0);
vo4p0 = vmlaq_lane_f32(vo4p0, vi5x5678, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x5678, vw89, 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi4x5678, vw89, 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi5x5678, vw89, 1);
vo4p0 = vmlaq_lane_f32(vo4p0, vi6x5678, vw89, 1);
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
vi5x4567 = vi5x89AB;
vi6x4567 = vi6x89AB;
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
float32x4_t vo2 = vmaxq_f32(vo2p0, vmin);
float32x4_t vo3 = vmaxq_f32(vo3p0, vmin);
float32x4_t vo4 = vmaxq_f32(vo4p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vo2 = vminq_f32(vo2, vmax);
vo3 = vminq_f32(vo3, vmax);
vo4 = vminq_f32(vo4, vmax);
vst1q_f32(o4, vo4); o4 += 4;
vst1q_f32(o3, vo3); o3 += 4;
vst1q_f32(o2, vo2); o2 += 4;
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo2p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo3p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo4p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
vi5x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi5x4567)));
vi6x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi6x4567)));
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi2x4567, vget_high_f32(vw0123), 0);
vo3p0 = vmlaq_lane_f32(vo3p0, vi3x4567, vget_high_f32(vw0123), 0);
vo4p0 = vmlaq_lane_f32(vo4p0, vi4x4567, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw4567), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi3x4567, vget_low_f32(vw4567), 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi4x4567, vget_low_f32(vw4567), 1);
vo4p0 = vmlaq_lane_f32(vo4p0, vi5x4567, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x4567, vw89, 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi4x4567, vw89, 0);
vo3p0 = vmlaq_lane_f32(vo3p0, vi5x4567, vw89, 0);
vo4p0 = vmlaq_lane_f32(vo4p0, vi6x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
const float32x4_t vi6x3456 = vextq_f32(vi6x0123, vi6x4567, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x3456, vget_low_f32(vw0123), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi2x3456, vget_low_f32(vw0123), 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi3x3456, vget_low_f32(vw0123), 1);
vo4p0 = vmlaq_lane_f32(vo4p0, vi4x3456, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi3x3456, vget_low_f32(vw4567), 0);
vo3p0 = vmlaq_lane_f32(vo3p0, vi4x3456, vget_low_f32(vw4567), 0);
vo4p0 = vmlaq_lane_f32(vo4p0, vi5x3456, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x3456, vget_high_f32(vw4567), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi4x3456, vget_high_f32(vw4567), 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi5x3456, vget_high_f32(vw4567), 1);
vo4p0 = vmlaq_lane_f32(vo4p0, vi6x3456, vget_high_f32(vw4567), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vzero, 1);
const float32x4_t vi6x5678 = vextq_f32(vi6x4567, vzero, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x5678, vget_high_f32(vw0123), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi2x5678, vget_high_f32(vw0123), 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi3x5678, vget_high_f32(vw0123), 1);
vo4p0 = vmlaq_lane_f32(vo4p0, vi4x5678, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_high_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x5678, vget_high_f32(vw4567), 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi3x5678, vget_high_f32(vw4567), 0);
vo3p0 = vmlaq_lane_f32(vo3p0, vi4x5678, vget_high_f32(vw4567), 0);
vo4p0 = vmlaq_lane_f32(vo4p0, vi5x5678, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x5678, vw89, 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi4x5678, vw89, 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi5x5678, vw89, 1);
vo4p0 = vmlaq_lane_f32(vo4p0, vi6x5678, vw89, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
float32x4_t vo2 = vmaxq_f32(vo2p0, vmin);
float32x4_t vo3 = vmaxq_f32(vo3p0, vmin);
float32x4_t vo4 = vmaxq_f32(vo4p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vo2 = vminq_f32(vo2, vmax);
vo3 = vminq_f32(vo3, vmax);
vo4 = vminq_f32(vo4, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
vst1q_f32(o4, vo4); o4 += 4;
vst1q_f32(o3, vo3); o3 += 4;
vst1q_f32(o2, vo2); o2 += 4;
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
float32x2_t vo1_lo = vget_low_f32(vo1);
float32x2_t vo2_lo = vget_low_f32(vo2);
float32x2_t vo3_lo = vget_low_f32(vo3);
float32x2_t vo4_lo = vget_low_f32(vo4);
if (w & (2 * sizeof(float))) {
vst1_f32(o4, vo4_lo); o4 += 2;
vst1_f32(o3, vo3_lo); o3 += 2;
vst1_f32(o2, vo2_lo); o2 += 2;
vst1_f32(o1, vo1_lo); o1 += 2;
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
vo1_lo = vget_high_f32(vo1);
vo2_lo = vget_high_f32(vo2);
vo3_lo = vget_high_f32(vo3);
vo4_lo = vget_high_f32(vo4);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o4, vo4_lo, 0); o4 += 1;
vst1_lane_f32(o3, vo3_lo, 0); o3 += 1;
vst1_lane_f32(o2, vo2_lo, 0); o2 += 1;
vst1_lane_f32(o1, vo1_lo, 0); o1 += 1;
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i5 - input_decrement);
i1 = (const float*) ((uintptr_t) i6 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
i6 = (const float*) ((uintptr_t) i5 + input_width);
o0 = o4;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
o4 = (float*) ((uintptr_t) o3 + input_width);
output_height = doz(output_height, 5);
} while (output_height != 0);
}
| 16,475 | 43.52973 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-neon-6x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_6x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
float* o4 = (float*) ((uintptr_t) o3 + input_width);
float* o5 = (float*) ((uintptr_t) o4 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
o4 = o3;
}
if XNN_UNPREDICTABLE(output_height < 6) {
i6 = zero;
o5 = o4;
}
if XNN_UNPREDICTABLE(output_height < 7) {
i7 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
float32x4_t vi5x0123 = vmovq_n_f32(0.0f);
float32x4_t vi6x0123 = vmovq_n_f32(0.0f);
float32x4_t vi7x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo2p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo3p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo4p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo5p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5x89AB = vld1q_f32(i5); i5 += 4;
const float32x4_t vi6x89AB = vld1q_f32(i6); i6 += 4;
const float32x4_t vi7x89AB = vld1q_f32(i7); i7 += 4;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi2x4567, vget_high_f32(vw0123), 0);
vo3p0 = vmlaq_lane_f32(vo3p0, vi3x4567, vget_high_f32(vw0123), 0);
vo4p0 = vmlaq_lane_f32(vo4p0, vi4x4567, vget_high_f32(vw0123), 0);
vo5p0 = vmlaq_lane_f32(vo5p0, vi5x4567, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw4567), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi3x4567, vget_low_f32(vw4567), 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi4x4567, vget_low_f32(vw4567), 1);
vo4p0 = vmlaq_lane_f32(vo4p0, vi5x4567, vget_low_f32(vw4567), 1);
vo5p0 = vmlaq_lane_f32(vo5p0, vi6x4567, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x4567, vw89, 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi4x4567, vw89, 0);
vo3p0 = vmlaq_lane_f32(vo3p0, vi5x4567, vw89, 0);
vo4p0 = vmlaq_lane_f32(vo4p0, vi6x4567, vw89, 0);
vo5p0 = vmlaq_lane_f32(vo5p0, vi7x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
const float32x4_t vi6x3456 = vextq_f32(vi6x0123, vi6x4567, 3);
const float32x4_t vi7x3456 = vextq_f32(vi7x0123, vi7x4567, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x3456, vget_low_f32(vw0123), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi2x3456, vget_low_f32(vw0123), 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi3x3456, vget_low_f32(vw0123), 1);
vo4p0 = vmlaq_lane_f32(vo4p0, vi4x3456, vget_low_f32(vw0123), 1);
vo5p0 = vmlaq_lane_f32(vo5p0, vi5x3456, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi3x3456, vget_low_f32(vw4567), 0);
vo3p0 = vmlaq_lane_f32(vo3p0, vi4x3456, vget_low_f32(vw4567), 0);
vo4p0 = vmlaq_lane_f32(vo4p0, vi5x3456, vget_low_f32(vw4567), 0);
vo5p0 = vmlaq_lane_f32(vo5p0, vi6x3456, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x3456, vget_high_f32(vw4567), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi4x3456, vget_high_f32(vw4567), 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi5x3456, vget_high_f32(vw4567), 1);
vo4p0 = vmlaq_lane_f32(vo4p0, vi6x3456, vget_high_f32(vw4567), 1);
vo5p0 = vmlaq_lane_f32(vo5p0, vi7x3456, vget_high_f32(vw4567), 1);
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
vi5x0123 = vi5x4567;
vi6x0123 = vi6x4567;
vi7x0123 = vi7x4567;
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vi5x89AB, 1);
const float32x4_t vi6x5678 = vextq_f32(vi6x4567, vi6x89AB, 1);
const float32x4_t vi7x5678 = vextq_f32(vi7x4567, vi7x89AB, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x5678, vget_high_f32(vw0123), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi2x5678, vget_high_f32(vw0123), 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi3x5678, vget_high_f32(vw0123), 1);
vo4p0 = vmlaq_lane_f32(vo4p0, vi4x5678, vget_high_f32(vw0123), 1);
vo5p0 = vmlaq_lane_f32(vo5p0, vi5x5678, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_high_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x5678, vget_high_f32(vw4567), 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi3x5678, vget_high_f32(vw4567), 0);
vo3p0 = vmlaq_lane_f32(vo3p0, vi4x5678, vget_high_f32(vw4567), 0);
vo4p0 = vmlaq_lane_f32(vo4p0, vi5x5678, vget_high_f32(vw4567), 0);
vo5p0 = vmlaq_lane_f32(vo5p0, vi6x5678, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x5678, vw89, 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi4x5678, vw89, 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi5x5678, vw89, 1);
vo4p0 = vmlaq_lane_f32(vo4p0, vi6x5678, vw89, 1);
vo5p0 = vmlaq_lane_f32(vo5p0, vi7x5678, vw89, 1);
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
vi5x4567 = vi5x89AB;
vi6x4567 = vi6x89AB;
vi7x4567 = vi7x89AB;
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
float32x4_t vo2 = vmaxq_f32(vo2p0, vmin);
float32x4_t vo3 = vmaxq_f32(vo3p0, vmin);
float32x4_t vo4 = vmaxq_f32(vo4p0, vmin);
float32x4_t vo5 = vmaxq_f32(vo5p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vo2 = vminq_f32(vo2, vmax);
vo3 = vminq_f32(vo3, vmax);
vo4 = vminq_f32(vo4, vmax);
vo5 = vminq_f32(vo5, vmax);
vst1q_f32(o5, vo5); o5 += 4;
vst1q_f32(o4, vo4); o4 += 4;
vst1q_f32(o3, vo3); o3 += 4;
vst1q_f32(o2, vo2); o2 += 4;
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo2p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo3p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo4p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo5p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
vi5x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi5x4567)));
vi6x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi6x4567)));
vi7x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi7x4567)));
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi2x4567, vget_high_f32(vw0123), 0);
vo3p0 = vmlaq_lane_f32(vo3p0, vi3x4567, vget_high_f32(vw0123), 0);
vo4p0 = vmlaq_lane_f32(vo4p0, vi4x4567, vget_high_f32(vw0123), 0);
vo5p0 = vmlaq_lane_f32(vo5p0, vi5x4567, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw4567), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi3x4567, vget_low_f32(vw4567), 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi4x4567, vget_low_f32(vw4567), 1);
vo4p0 = vmlaq_lane_f32(vo4p0, vi5x4567, vget_low_f32(vw4567), 1);
vo5p0 = vmlaq_lane_f32(vo5p0, vi6x4567, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vw89, 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x4567, vw89, 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi4x4567, vw89, 0);
vo3p0 = vmlaq_lane_f32(vo3p0, vi5x4567, vw89, 0);
vo4p0 = vmlaq_lane_f32(vo4p0, vi6x4567, vw89, 0);
vo5p0 = vmlaq_lane_f32(vo5p0, vi7x4567, vw89, 0);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
const float32x4_t vi6x3456 = vextq_f32(vi6x0123, vi6x4567, 3);
const float32x4_t vi7x3456 = vextq_f32(vi7x0123, vi7x4567, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x3456, vget_low_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x3456, vget_low_f32(vw0123), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi2x3456, vget_low_f32(vw0123), 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi3x3456, vget_low_f32(vw0123), 1);
vo4p0 = vmlaq_lane_f32(vo4p0, vi4x3456, vget_low_f32(vw0123), 1);
vo5p0 = vmlaq_lane_f32(vo5p0, vi5x3456, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_low_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi3x3456, vget_low_f32(vw4567), 0);
vo3p0 = vmlaq_lane_f32(vo3p0, vi4x3456, vget_low_f32(vw4567), 0);
vo4p0 = vmlaq_lane_f32(vo4p0, vi5x3456, vget_low_f32(vw4567), 0);
vo5p0 = vmlaq_lane_f32(vo5p0, vi6x3456, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x3456, vget_high_f32(vw4567), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi4x3456, vget_high_f32(vw4567), 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi5x3456, vget_high_f32(vw4567), 1);
vo4p0 = vmlaq_lane_f32(vo4p0, vi6x3456, vget_high_f32(vw4567), 1);
vo5p0 = vmlaq_lane_f32(vo5p0, vi7x3456, vget_high_f32(vw4567), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vzero, 1);
const float32x4_t vi6x5678 = vextq_f32(vi6x4567, vzero, 1);
const float32x4_t vi7x5678 = vextq_f32(vi7x4567, vzero, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_high_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x5678, vget_high_f32(vw0123), 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi2x5678, vget_high_f32(vw0123), 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi3x5678, vget_high_f32(vw0123), 1);
vo4p0 = vmlaq_lane_f32(vo4p0, vi4x5678, vget_high_f32(vw0123), 1);
vo5p0 = vmlaq_lane_f32(vo5p0, vi5x5678, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_high_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x5678, vget_high_f32(vw4567), 0);
vo2p0 = vmlaq_lane_f32(vo2p0, vi3x5678, vget_high_f32(vw4567), 0);
vo3p0 = vmlaq_lane_f32(vo3p0, vi4x5678, vget_high_f32(vw4567), 0);
vo4p0 = vmlaq_lane_f32(vo4p0, vi5x5678, vget_high_f32(vw4567), 0);
vo5p0 = vmlaq_lane_f32(vo5p0, vi6x5678, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vw89, 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x5678, vw89, 1);
vo2p0 = vmlaq_lane_f32(vo2p0, vi4x5678, vw89, 1);
vo3p0 = vmlaq_lane_f32(vo3p0, vi5x5678, vw89, 1);
vo4p0 = vmlaq_lane_f32(vo4p0, vi6x5678, vw89, 1);
vo5p0 = vmlaq_lane_f32(vo5p0, vi7x5678, vw89, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
float32x4_t vo2 = vmaxq_f32(vo2p0, vmin);
float32x4_t vo3 = vmaxq_f32(vo3p0, vmin);
float32x4_t vo4 = vmaxq_f32(vo4p0, vmin);
float32x4_t vo5 = vmaxq_f32(vo5p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vo2 = vminq_f32(vo2, vmax);
vo3 = vminq_f32(vo3, vmax);
vo4 = vminq_f32(vo4, vmax);
vo5 = vminq_f32(vo5, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
vst1q_f32(o5, vo5); o5 += 4;
vst1q_f32(o4, vo4); o4 += 4;
vst1q_f32(o3, vo3); o3 += 4;
vst1q_f32(o2, vo2); o2 += 4;
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
float32x2_t vo1_lo = vget_low_f32(vo1);
float32x2_t vo2_lo = vget_low_f32(vo2);
float32x2_t vo3_lo = vget_low_f32(vo3);
float32x2_t vo4_lo = vget_low_f32(vo4);
float32x2_t vo5_lo = vget_low_f32(vo5);
if (w & (2 * sizeof(float))) {
vst1_f32(o5, vo5_lo); o5 += 2;
vst1_f32(o4, vo4_lo); o4 += 2;
vst1_f32(o3, vo3_lo); o3 += 2;
vst1_f32(o2, vo2_lo); o2 += 2;
vst1_f32(o1, vo1_lo); o1 += 2;
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
vo1_lo = vget_high_f32(vo1);
vo2_lo = vget_high_f32(vo2);
vo3_lo = vget_high_f32(vo3);
vo4_lo = vget_high_f32(vo4);
vo5_lo = vget_high_f32(vo5);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o5, vo5_lo, 0); o5 += 1;
vst1_lane_f32(o4, vo4_lo, 0); o4 += 1;
vst1_lane_f32(o3, vo3_lo, 0); o3 += 1;
vst1_lane_f32(o2, vo2_lo, 0); o2 += 1;
vst1_lane_f32(o1, vo1_lo, 0); o1 += 1;
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i6 - input_decrement);
i1 = (const float*) ((uintptr_t) i7 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
i6 = (const float*) ((uintptr_t) i5 + input_width);
i7 = (const float*) ((uintptr_t) i6 + input_width);
o0 = o5;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
o4 = (float*) ((uintptr_t) o3 + input_width);
o5 = (float*) ((uintptr_t) o4 + input_width);
output_height = doz(output_height, 6);
} while (output_height != 0);
}
| 19,148 | 44.811005 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-scalar-1x1-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__scalar_1x1_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk10 = weights[4];
const float vk11 = weights[5];
const float vk12 = weights[6];
const float vk20 = weights[7];
const float vk21 = weights[8];
const float vk22 = weights[9];
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi0x1 = *i0++;
float vi1x1 = *i1++;
float vi2x1 = *i2++;
size_t w = input_width;
for (; w > 1 * sizeof(float); w -= 1 * sizeof(float)) {
const float vi0x2 = *i0++;
const float vi1x2 = *i1++;
const float vi2x2 = *i2++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
vo0p0 += vi2x0 * vk20;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vo0p1 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p1 += vi2x1 * vk21;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vo0p0 += vi0x2 * vk02;
vo0p1 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p0 += vo0p1;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
// Always process the last pixel separately to account for right edge.
assert(w == 1 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo0p1 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo0p0 += vo0p1;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i1 - input_width);
} while (--output_height != 0);
}
| 2,886 | 22.282258 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-scalar-1x1-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__scalar_1x1_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk10 = weights[4];
const float vk11 = weights[5];
const float vk12 = weights[6];
const float vk20 = weights[7];
const float vk21 = weights[8];
const float vk22 = weights[9];
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi0x1 = *i0++;
float vi1x1 = *i1++;
float vi2x1 = *i2++;
size_t w = input_width;
for (; w > 1 * sizeof(float); w -= 1 * sizeof(float)) {
const float vi0x2 = *i0++;
const float vi1x2 = *i1++;
const float vi2x2 = *i2++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vo0p0 += vi0x1 * vk01;
vo0p1 += vi1x1 * vk11;
vo0p2 += vi2x1 * vk21;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vo0p0 += vi0x2 * vk02;
vo0p1 += vi1x2 * vk12;
vo0p2 += vi2x2 * vk22;
vo0p0 += vo0p1;
vo0p0 += vo0p2;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
// Always process the last pixel separately to account for right edge.
assert(w == 1 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
vo0p0 += vi0x1 * vk01;
vo0p1 += vi1x1 * vk11;
vo0p2 += vi2x1 * vk21;
vo0p0 += vo0p1;
vo0p0 += vo0p2;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i1 - input_width);
} while (--output_height != 0);
}
| 2,940 | 22.34127 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-scalar-1x1-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__scalar_1x1_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk10 = weights[4];
const float vk11 = weights[5];
const float vk12 = weights[6];
const float vk20 = weights[7];
const float vk21 = weights[8];
const float vk22 = weights[9];
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi0x1 = *i0++;
float vi1x1 = *i1++;
float vi2x1 = *i2++;
size_t w = input_width;
for (; w > 1 * sizeof(float); w -= 1 * sizeof(float)) {
const float vi0x2 = *i0++;
const float vi1x2 = *i1++;
const float vi2x2 = *i2++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
float vo0p3 = vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p1 += vi2x1 * vk21;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vo0p2 += vi0x2 * vk02;
vo0p3 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p0 += vo0p1;
vo0p2 += vo0p3;
vo0p0 += vo0p2;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
// Always process the last pixel separately to account for right edge.
assert(w == 1 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
float vo0p3 = vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo0p0 += vo0p1;
vo0p2 += vo0p3;
vo0p0 += vo0p2;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i1 - input_width);
} while (--output_height != 0);
}
| 2,994 | 22.398438 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-scalar-1x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__scalar_1x1(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk10 = weights[4];
const float vk11 = weights[5];
const float vk12 = weights[6];
const float vk20 = weights[7];
const float vk21 = weights[8];
const float vk22 = weights[9];
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi0x1 = *i0++;
float vi1x1 = *i1++;
float vi2x1 = *i2++;
size_t w = input_width;
for (; w > 1 * sizeof(float); w -= 1 * sizeof(float)) {
const float vi0x2 = *i0++;
const float vi1x2 = *i1++;
const float vi2x2 = *i2++;
float vo0p0 = vbias + vi0x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo0p0 += vi2x0 * vk20;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vo0p0 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p0 += vi2x1 * vk21;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vo0p0 += vi0x2 * vk02;
vo0p0 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
// Always process the last pixel separately to account for right edge.
assert(w == 1 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo0p0 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p0 += vi2x1 * vk21;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i1 - input_width);
} while (--output_height != 0);
}
| 2,827 | 22.180328 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-scalar-2x1-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__scalar_2x1_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk10 = weights[4];
const float vk11 = weights[5];
const float vk12 = weights[6];
const float vk20 = weights[7];
const float vk21 = weights[8];
const float vk22 = weights[9];
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi3x0 = 0.0f;
float vi0x1 = *i0++;
float vi1x1 = *i1++;
float vi2x1 = *i2++;
float vi3x1 = *i3++;
size_t w = input_width;
for (; w > 1 * sizeof(float); w -= 1 * sizeof(float)) {
const float vi0x2 = *i0++;
const float vi1x2 = *i1++;
const float vi2x2 = *i2++;
const float vi3x2 = *i3++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo1p1 = vi2x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo1p0 += vi3x0 * vk20;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vo0p1 += vi0x1 * vk01;
vo1p1 += vi1x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo1p1 += vi3x1 * vk21;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vo0p0 += vi0x2 * vk02;
vo1p0 += vi1x2 * vk02;
vo0p1 += vi1x2 * vk12;
vo1p1 += vi2x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo1p0 += vi3x2 * vk22;
vo0p0 += vo0p1;
vo1p0 += vo1p1;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
*o1++ = vo1;
*o0++ = vo0;
}
// Always process the last pixel separately to account for right edge.
assert(w == 1 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo1p1 = vi2x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo1p0 += vi3x0 * vk20;
vo0p1 += vi0x1 * vk01;
vo1p1 += vi1x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo1p1 += vi3x1 * vk21;
vo0p0 += vo0p1;
vo1p0 += vo1p1;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
*o1++ = vo1;
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i2 - input_width);
i1 = (const float*) ((uintptr_t) i3 - input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 4,205 | 24.646341 | 74 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.