repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-scalar-2x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__scalar_2x1(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk10 = weights[4];
const float vk11 = weights[5];
const float vk12 = weights[6];
const float vk20 = weights[7];
const float vk21 = weights[8];
const float vk22 = weights[9];
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi3x0 = 0.0f;
float vi0x1 = *i0++;
float vi1x1 = *i1++;
float vi2x1 = *i2++;
float vi3x1 = *i3++;
size_t w = input_width;
for (; w > 1 * sizeof(float); w -= 1 * sizeof(float)) {
const float vi0x2 = *i0++;
const float vi1x2 = *i1++;
const float vi2x2 = *i2++;
const float vi3x2 = *i3++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo1p0 += vi2x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo1p0 += vi3x0 * vk20;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vo0p0 += vi0x1 * vk01;
vo1p0 += vi1x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo0p0 += vi2x1 * vk21;
vo1p0 += vi3x1 * vk21;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vo0p0 += vi0x2 * vk02;
vo1p0 += vi1x2 * vk02;
vo0p0 += vi1x2 * vk12;
vo1p0 += vi2x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo1p0 += vi3x2 * vk22;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
*o1++ = vo1;
*o0++ = vo0;
}
// Always process the last pixel separately to account for right edge.
assert(w == 1 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo1p0 += vi2x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo1p0 += vi3x0 * vk20;
vo0p0 += vi0x1 * vk01;
vo1p0 += vi1x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo0p0 += vi2x1 * vk21;
vo1p0 += vi3x1 * vk21;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
*o1++ = vo1;
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i2 - input_width);
i1 = (const float*) ((uintptr_t) i3 - input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 4,092 | 24.58125 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-scalar-3x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__scalar_3x1(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk10 = weights[4];
const float vk11 = weights[5];
const float vk12 = weights[6];
const float vk20 = weights[7];
const float vk21 = weights[8];
const float vk22 = weights[9];
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi3x0 = 0.0f;
float vi4x0 = 0.0f;
float vi0x1 = *i0++;
float vi1x1 = *i1++;
float vi2x1 = *i2++;
float vi3x1 = *i3++;
float vi4x1 = *i4++;
size_t w = input_width;
for (; w > 1 * sizeof(float); w -= 1 * sizeof(float)) {
const float vi0x2 = *i0++;
const float vi1x2 = *i1++;
const float vi2x2 = *i2++;
const float vi3x2 = *i3++;
const float vi4x2 = *i4++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
float vo2p0 = vbias + vi2x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo1p0 += vi2x0 * vk10;
vo2p0 += vi3x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo1p0 += vi3x0 * vk20;
vo2p0 += vi4x0 * vk20;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vi4x0 = vi4x1;
vo0p0 += vi0x1 * vk01;
vo1p0 += vi1x1 * vk01;
vo2p0 += vi2x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo2p0 += vi3x1 * vk11;
vo0p0 += vi2x1 * vk21;
vo1p0 += vi3x1 * vk21;
vo2p0 += vi4x1 * vk21;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vi4x1 = vi4x2;
vo0p0 += vi0x2 * vk02;
vo1p0 += vi1x2 * vk02;
vo2p0 += vi2x2 * vk02;
vo0p0 += vi1x2 * vk12;
vo1p0 += vi2x2 * vk12;
vo2p0 += vi3x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo1p0 += vi3x2 * vk22;
vo2p0 += vi4x2 * vk22;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
float vo2 = math_max_f32(vo2p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
vo2 = math_min_f32(vo2, vmax);
*o2++ = vo2;
*o1++ = vo1;
*o0++ = vo0;
}
// Always process the last pixel separately to account for right edge.
assert(w == 1 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
float vo2p0 = vbias + vi2x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo1p0 += vi2x0 * vk10;
vo2p0 += vi3x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo1p0 += vi3x0 * vk20;
vo2p0 += vi4x0 * vk20;
vo0p0 += vi0x1 * vk01;
vo1p0 += vi1x1 * vk01;
vo2p0 += vi2x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo2p0 += vi3x1 * vk11;
vo0p0 += vi2x1 * vk21;
vo1p0 += vi3x1 * vk21;
vo2p0 += vi4x1 * vk21;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
float vo2 = math_max_f32(vo2p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
vo2 = math_min_f32(vo2, vmax);
*o2++ = vo2;
*o1++ = vo1;
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i3 - input_width);
i1 = (const float*) ((uintptr_t) i4 - input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
o0 = o2;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
output_height = doz(output_height, 3);
} while (output_height != 0);
}
| 5,191 | 25.762887 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-scalar-4x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__scalar_4x1(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk10 = weights[4];
const float vk11 = weights[5];
const float vk12 = weights[6];
const float vk20 = weights[7];
const float vk21 = weights[8];
const float vk22 = weights[9];
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi3x0 = 0.0f;
float vi4x0 = 0.0f;
float vi5x0 = 0.0f;
float vi0x1 = *i0++;
float vi1x1 = *i1++;
float vi2x1 = *i2++;
float vi3x1 = *i3++;
float vi4x1 = *i4++;
float vi5x1 = *i5++;
size_t w = input_width;
for (; w > 1 * sizeof(float); w -= 1 * sizeof(float)) {
const float vi0x2 = *i0++;
const float vi1x2 = *i1++;
const float vi2x2 = *i2++;
const float vi3x2 = *i3++;
const float vi4x2 = *i4++;
const float vi5x2 = *i5++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
float vo2p0 = vbias + vi2x0 * vk00;
float vo3p0 = vbias + vi3x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo1p0 += vi2x0 * vk10;
vo2p0 += vi3x0 * vk10;
vo3p0 += vi4x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo1p0 += vi3x0 * vk20;
vo2p0 += vi4x0 * vk20;
vo3p0 += vi5x0 * vk20;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vi4x0 = vi4x1;
vi5x0 = vi5x1;
vo0p0 += vi0x1 * vk01;
vo1p0 += vi1x1 * vk01;
vo2p0 += vi2x1 * vk01;
vo3p0 += vi3x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo2p0 += vi3x1 * vk11;
vo3p0 += vi4x1 * vk11;
vo0p0 += vi2x1 * vk21;
vo1p0 += vi3x1 * vk21;
vo2p0 += vi4x1 * vk21;
vo3p0 += vi5x1 * vk21;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vi4x1 = vi4x2;
vi5x1 = vi5x2;
vo0p0 += vi0x2 * vk02;
vo1p0 += vi1x2 * vk02;
vo2p0 += vi2x2 * vk02;
vo3p0 += vi3x2 * vk02;
vo0p0 += vi1x2 * vk12;
vo1p0 += vi2x2 * vk12;
vo2p0 += vi3x2 * vk12;
vo3p0 += vi4x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo1p0 += vi3x2 * vk22;
vo2p0 += vi4x2 * vk22;
vo3p0 += vi5x2 * vk22;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
float vo2 = math_max_f32(vo2p0, vmin);
float vo3 = math_max_f32(vo3p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
vo2 = math_min_f32(vo2, vmax);
vo3 = math_min_f32(vo3, vmax);
*o3++ = vo3;
*o2++ = vo2;
*o1++ = vo1;
*o0++ = vo0;
}
// Always process the last pixel separately to account for right edge.
assert(w == 1 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
float vo2p0 = vbias + vi2x0 * vk00;
float vo3p0 = vbias + vi3x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo1p0 += vi2x0 * vk10;
vo2p0 += vi3x0 * vk10;
vo3p0 += vi4x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo1p0 += vi3x0 * vk20;
vo2p0 += vi4x0 * vk20;
vo3p0 += vi5x0 * vk20;
vo0p0 += vi0x1 * vk01;
vo1p0 += vi1x1 * vk01;
vo2p0 += vi2x1 * vk01;
vo3p0 += vi3x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo2p0 += vi3x1 * vk11;
vo3p0 += vi4x1 * vk11;
vo0p0 += vi2x1 * vk21;
vo1p0 += vi3x1 * vk21;
vo2p0 += vi4x1 * vk21;
vo3p0 += vi5x1 * vk21;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
float vo2 = math_max_f32(vo2p0, vmin);
float vo3 = math_max_f32(vo3p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
vo2 = math_min_f32(vo2, vmax);
vo3 = math_min_f32(vo3, vmax);
*o3++ = vo3;
*o2++ = vo2;
*o1++ = vo1;
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i4 - input_width);
i1 = (const float*) ((uintptr_t) i5 - input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
o0 = o3;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
output_height = doz(output_height, 4);
} while (output_height != 0);
}
| 6,290 | 26.592105 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-scalar-5x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__scalar_5x1(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk10 = weights[4];
const float vk11 = weights[5];
const float vk12 = weights[6];
const float vk20 = weights[7];
const float vk21 = weights[8];
const float vk22 = weights[9];
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
float* o4 = (float*) ((uintptr_t) o3 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
o4 = o3;
}
if XNN_UNPREDICTABLE(output_height < 6) {
i6 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi3x0 = 0.0f;
float vi4x0 = 0.0f;
float vi5x0 = 0.0f;
float vi6x0 = 0.0f;
float vi0x1 = *i0++;
float vi1x1 = *i1++;
float vi2x1 = *i2++;
float vi3x1 = *i3++;
float vi4x1 = *i4++;
float vi5x1 = *i5++;
float vi6x1 = *i6++;
size_t w = input_width;
for (; w > 1 * sizeof(float); w -= 1 * sizeof(float)) {
const float vi0x2 = *i0++;
const float vi1x2 = *i1++;
const float vi2x2 = *i2++;
const float vi3x2 = *i3++;
const float vi4x2 = *i4++;
const float vi5x2 = *i5++;
const float vi6x2 = *i6++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
float vo2p0 = vbias + vi2x0 * vk00;
float vo3p0 = vbias + vi3x0 * vk00;
float vo4p0 = vbias + vi4x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo1p0 += vi2x0 * vk10;
vo2p0 += vi3x0 * vk10;
vo3p0 += vi4x0 * vk10;
vo4p0 += vi5x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo1p0 += vi3x0 * vk20;
vo2p0 += vi4x0 * vk20;
vo3p0 += vi5x0 * vk20;
vo4p0 += vi6x0 * vk20;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vi4x0 = vi4x1;
vi5x0 = vi5x1;
vi6x0 = vi6x1;
vo0p0 += vi0x1 * vk01;
vo1p0 += vi1x1 * vk01;
vo2p0 += vi2x1 * vk01;
vo3p0 += vi3x1 * vk01;
vo4p0 += vi4x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo2p0 += vi3x1 * vk11;
vo3p0 += vi4x1 * vk11;
vo4p0 += vi5x1 * vk11;
vo0p0 += vi2x1 * vk21;
vo1p0 += vi3x1 * vk21;
vo2p0 += vi4x1 * vk21;
vo3p0 += vi5x1 * vk21;
vo4p0 += vi6x1 * vk21;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vi4x1 = vi4x2;
vi5x1 = vi5x2;
vi6x1 = vi6x2;
vo0p0 += vi0x2 * vk02;
vo1p0 += vi1x2 * vk02;
vo2p0 += vi2x2 * vk02;
vo3p0 += vi3x2 * vk02;
vo4p0 += vi4x2 * vk02;
vo0p0 += vi1x2 * vk12;
vo1p0 += vi2x2 * vk12;
vo2p0 += vi3x2 * vk12;
vo3p0 += vi4x2 * vk12;
vo4p0 += vi5x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo1p0 += vi3x2 * vk22;
vo2p0 += vi4x2 * vk22;
vo3p0 += vi5x2 * vk22;
vo4p0 += vi6x2 * vk22;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
float vo2 = math_max_f32(vo2p0, vmin);
float vo3 = math_max_f32(vo3p0, vmin);
float vo4 = math_max_f32(vo4p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
vo2 = math_min_f32(vo2, vmax);
vo3 = math_min_f32(vo3, vmax);
vo4 = math_min_f32(vo4, vmax);
*o4++ = vo4;
*o3++ = vo3;
*o2++ = vo2;
*o1++ = vo1;
*o0++ = vo0;
}
// Always process the last pixel separately to account for right edge.
assert(w == 1 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
float vo2p0 = vbias + vi2x0 * vk00;
float vo3p0 = vbias + vi3x0 * vk00;
float vo4p0 = vbias + vi4x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo1p0 += vi2x0 * vk10;
vo2p0 += vi3x0 * vk10;
vo3p0 += vi4x0 * vk10;
vo4p0 += vi5x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo1p0 += vi3x0 * vk20;
vo2p0 += vi4x0 * vk20;
vo3p0 += vi5x0 * vk20;
vo4p0 += vi6x0 * vk20;
vo0p0 += vi0x1 * vk01;
vo1p0 += vi1x1 * vk01;
vo2p0 += vi2x1 * vk01;
vo3p0 += vi3x1 * vk01;
vo4p0 += vi4x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo2p0 += vi3x1 * vk11;
vo3p0 += vi4x1 * vk11;
vo4p0 += vi5x1 * vk11;
vo0p0 += vi2x1 * vk21;
vo1p0 += vi3x1 * vk21;
vo2p0 += vi4x1 * vk21;
vo3p0 += vi5x1 * vk21;
vo4p0 += vi6x1 * vk21;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
float vo2 = math_max_f32(vo2p0, vmin);
float vo3 = math_max_f32(vo3p0, vmin);
float vo4 = math_max_f32(vo4p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
vo2 = math_min_f32(vo2, vmax);
vo3 = math_min_f32(vo3, vmax);
vo4 = math_min_f32(vo4, vmax);
*o4++ = vo4;
*o3++ = vo3;
*o2++ = vo2;
*o1++ = vo1;
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i5 - input_width);
i1 = (const float*) ((uintptr_t) i6 - input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
i6 = (const float*) ((uintptr_t) i5 + input_width);
o0 = o4;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
o4 = (float*) ((uintptr_t) o3 + input_width);
output_height = doz(output_height, 5);
} while (output_height != 0);
}
| 7,389 | 27.206107 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-scalar-6x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__scalar_6x1(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk10 = weights[4];
const float vk11 = weights[5];
const float vk12 = weights[6];
const float vk20 = weights[7];
const float vk21 = weights[8];
const float vk22 = weights[9];
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
float* o4 = (float*) ((uintptr_t) o3 + input_width);
float* o5 = (float*) ((uintptr_t) o4 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
o4 = o3;
}
if XNN_UNPREDICTABLE(output_height < 6) {
i6 = zero;
o5 = o4;
}
if XNN_UNPREDICTABLE(output_height < 7) {
i7 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi3x0 = 0.0f;
float vi4x0 = 0.0f;
float vi5x0 = 0.0f;
float vi6x0 = 0.0f;
float vi7x0 = 0.0f;
float vi0x1 = *i0++;
float vi1x1 = *i1++;
float vi2x1 = *i2++;
float vi3x1 = *i3++;
float vi4x1 = *i4++;
float vi5x1 = *i5++;
float vi6x1 = *i6++;
float vi7x1 = *i7++;
size_t w = input_width;
for (; w > 1 * sizeof(float); w -= 1 * sizeof(float)) {
const float vi0x2 = *i0++;
const float vi1x2 = *i1++;
const float vi2x2 = *i2++;
const float vi3x2 = *i3++;
const float vi4x2 = *i4++;
const float vi5x2 = *i5++;
const float vi6x2 = *i6++;
const float vi7x2 = *i7++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
float vo2p0 = vbias + vi2x0 * vk00;
float vo3p0 = vbias + vi3x0 * vk00;
float vo4p0 = vbias + vi4x0 * vk00;
float vo5p0 = vbias + vi5x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo1p0 += vi2x0 * vk10;
vo2p0 += vi3x0 * vk10;
vo3p0 += vi4x0 * vk10;
vo4p0 += vi5x0 * vk10;
vo5p0 += vi6x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo1p0 += vi3x0 * vk20;
vo2p0 += vi4x0 * vk20;
vo3p0 += vi5x0 * vk20;
vo4p0 += vi6x0 * vk20;
vo5p0 += vi7x0 * vk20;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vi4x0 = vi4x1;
vi5x0 = vi5x1;
vi6x0 = vi6x1;
vi7x0 = vi7x1;
vo0p0 += vi0x1 * vk01;
vo1p0 += vi1x1 * vk01;
vo2p0 += vi2x1 * vk01;
vo3p0 += vi3x1 * vk01;
vo4p0 += vi4x1 * vk01;
vo5p0 += vi5x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo2p0 += vi3x1 * vk11;
vo3p0 += vi4x1 * vk11;
vo4p0 += vi5x1 * vk11;
vo5p0 += vi6x1 * vk11;
vo0p0 += vi2x1 * vk21;
vo1p0 += vi3x1 * vk21;
vo2p0 += vi4x1 * vk21;
vo3p0 += vi5x1 * vk21;
vo4p0 += vi6x1 * vk21;
vo5p0 += vi7x1 * vk21;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vi4x1 = vi4x2;
vi5x1 = vi5x2;
vi6x1 = vi6x2;
vi7x1 = vi7x2;
vo0p0 += vi0x2 * vk02;
vo1p0 += vi1x2 * vk02;
vo2p0 += vi2x2 * vk02;
vo3p0 += vi3x2 * vk02;
vo4p0 += vi4x2 * vk02;
vo5p0 += vi5x2 * vk02;
vo0p0 += vi1x2 * vk12;
vo1p0 += vi2x2 * vk12;
vo2p0 += vi3x2 * vk12;
vo3p0 += vi4x2 * vk12;
vo4p0 += vi5x2 * vk12;
vo5p0 += vi6x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo1p0 += vi3x2 * vk22;
vo2p0 += vi4x2 * vk22;
vo3p0 += vi5x2 * vk22;
vo4p0 += vi6x2 * vk22;
vo5p0 += vi7x2 * vk22;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
float vo2 = math_max_f32(vo2p0, vmin);
float vo3 = math_max_f32(vo3p0, vmin);
float vo4 = math_max_f32(vo4p0, vmin);
float vo5 = math_max_f32(vo5p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
vo2 = math_min_f32(vo2, vmax);
vo3 = math_min_f32(vo3, vmax);
vo4 = math_min_f32(vo4, vmax);
vo5 = math_min_f32(vo5, vmax);
*o5++ = vo5;
*o4++ = vo4;
*o3++ = vo3;
*o2++ = vo2;
*o1++ = vo1;
*o0++ = vo0;
}
// Always process the last pixel separately to account for right edge.
assert(w == 1 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
float vo2p0 = vbias + vi2x0 * vk00;
float vo3p0 = vbias + vi3x0 * vk00;
float vo4p0 = vbias + vi4x0 * vk00;
float vo5p0 = vbias + vi5x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo1p0 += vi2x0 * vk10;
vo2p0 += vi3x0 * vk10;
vo3p0 += vi4x0 * vk10;
vo4p0 += vi5x0 * vk10;
vo5p0 += vi6x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo1p0 += vi3x0 * vk20;
vo2p0 += vi4x0 * vk20;
vo3p0 += vi5x0 * vk20;
vo4p0 += vi6x0 * vk20;
vo5p0 += vi7x0 * vk20;
vo0p0 += vi0x1 * vk01;
vo1p0 += vi1x1 * vk01;
vo2p0 += vi2x1 * vk01;
vo3p0 += vi3x1 * vk01;
vo4p0 += vi4x1 * vk01;
vo5p0 += vi5x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo2p0 += vi3x1 * vk11;
vo3p0 += vi4x1 * vk11;
vo4p0 += vi5x1 * vk11;
vo5p0 += vi6x1 * vk11;
vo0p0 += vi2x1 * vk21;
vo1p0 += vi3x1 * vk21;
vo2p0 += vi4x1 * vk21;
vo3p0 += vi5x1 * vk21;
vo4p0 += vi6x1 * vk21;
vo5p0 += vi7x1 * vk21;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
float vo2 = math_max_f32(vo2p0, vmin);
float vo3 = math_max_f32(vo3p0, vmin);
float vo4 = math_max_f32(vo4p0, vmin);
float vo5 = math_max_f32(vo5p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
vo2 = math_min_f32(vo2, vmax);
vo3 = math_min_f32(vo3, vmax);
vo4 = math_min_f32(vo4, vmax);
vo5 = math_min_f32(vo5, vmax);
*o5++ = vo5;
*o4++ = vo4;
*o3++ = vo3;
*o2++ = vo2;
*o1++ = vo1;
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i6 - input_width);
i1 = (const float*) ((uintptr_t) i7 - input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
i6 = (const float*) ((uintptr_t) i5 + input_width);
i7 = (const float*) ((uintptr_t) i6 + input_width);
o0 = o5;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
o4 = (float*) ((uintptr_t) o3 + input_width);
o5 = (float*) ((uintptr_t) o4 + input_width);
output_height = doz(output_height, 6);
} while (output_height != 0);
}
| 8,488 | 27.679054 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-sse-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk10 = _mm_load1_ps(weights + 4);
const __m128 vk11 = _mm_load1_ps(weights + 5);
const __m128 vk12 = _mm_load1_ps(weights + 6);
const __m128 vk20 = _mm_load1_ps(weights + 7);
const __m128 vk21 = _mm_load1_ps(weights + 8);
const __m128 vk22 = _mm_load1_ps(weights + 9);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
// vi0x3012 = ( vi02, vi01, vi{M}0, vi{M}3 )
__m128 vi0x3012 = _mm_setzero_ps();
// vi1x3012 = ( vi12, vi11, vi{M}0, vi{M}3 )
__m128 vi1x3012 = _mm_setzero_ps();
// vi2x3012 = ( vi22, vi21, vi{M}0, vi{M}3 )
__m128 vi2x3012 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
// vi0x89AB = ( vi0B, vi0A, vi09, vi08 )
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
// vi1x89AB = ( vi1B, vi1A, vi19, vi18 )
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
// vi2x89AB = ( vi2B, vi2A, vi29, vi28 )
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
// vi0x7456 = ( vi06, vi05, vi04, vi07 )
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi1x7456 = ( vi16, vi15, vi14, vi17 )
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi2x7456 = ( vi26, vi25, vi24, vi27 )
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
// vi0x3456 = ( vi06, vi05, vi04, vi03 )
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
// vi1x3456 = ( vi16, vi15, vi14, vi13 )
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
// vi2x3456 = ( vi26, vi25, vi24, vi23 )
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk20));
vi0x3012 = vi0x7456;
vi1x3012 = vi1x7456;
vi2x3012 = vi2x7456;
// vi0x8567 = ( vi07, vi06, vi05, vi08 )
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
// vi1x8567 = ( vi17, vi16, vi15, vi18 )
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
// vi2x8567 = ( vi27, vi26, vi25, vi28 )
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
// vi0x5678 = ( vi08, vi07, vi06, vi05 )
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi1x5678 = ( vi18, vi17, vi16, vi15 )
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi2x5678 = ( vi28, vi27, vi26, vi25 )
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vmask, vi0x4567);
vi1x4567 = _mm_and_ps(vmask, vi1x4567);
vi2x4567 = _mm_and_ps(vmask, vi2x4567);
// vi0x7456 = ( vi06, vi05, vi04, vi07 )
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi1x7456 = ( vi16, vi15, vi14, vi17 )
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi2x7456 = ( vi26, vi25, vi24, vi27 )
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
// vi0x3456 = ( vi06, vi05, vi04, vi03 )
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
// vi1x3456 = ( vi16, vi15, vi14, vi13 )
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
// vi2x3456 = ( vi26, vi25, vi24, vi23 )
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk20));
const __m128 vzero = _mm_setzero_ps();
// vi0x8567 = ( vi07, vi06, vi05, 0.0 )
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
// vi1x8567 = ( vi17, vi16, vi15, 0.0 )
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
// vi2x8567 = ( vi27, vi26, vi25, 0.0 )
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
// vi0x5678 = ( vi08, vi07, vi06, vi05 )
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi1x5678 = ( vi18, vi17, vi16, vi15 )
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi2x5678 = ( vi28, vi27, vi26, vi25 )
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 8,132 | 35.308036 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-sse-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk10 = _mm_load1_ps(weights + 4);
const __m128 vk11 = _mm_load1_ps(weights + 5);
const __m128 vk12 = _mm_load1_ps(weights + 6);
const __m128 vk20 = _mm_load1_ps(weights + 7);
const __m128 vk21 = _mm_load1_ps(weights + 8);
const __m128 vk22 = _mm_load1_ps(weights + 9);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
// vi0x3012 = ( vi02, vi01, vi{M}0, vi{M}3 )
__m128 vi0x3012 = _mm_setzero_ps();
// vi1x3012 = ( vi12, vi11, vi{M}0, vi{M}3 )
__m128 vi1x3012 = _mm_setzero_ps();
// vi2x3012 = ( vi22, vi21, vi{M}0, vi{M}3 )
__m128 vi2x3012 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
// vi0x89AB = ( vi0B, vi0A, vi09, vi08 )
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
// vi1x89AB = ( vi1B, vi1A, vi19, vi18 )
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
// vi2x89AB = ( vi2B, vi2A, vi29, vi28 )
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
// vi0x7456 = ( vi06, vi05, vi04, vi07 )
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi1x7456 = ( vi16, vi15, vi14, vi17 )
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi2x7456 = ( vi26, vi25, vi24, vi27 )
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
__m128 vo0p2 = _mm_mul_ps(vi2x4567, vk21);
// vi0x3456 = ( vi06, vi05, vi04, vi03 )
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
// vi1x3456 = ( vi16, vi15, vi14, vi13 )
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
// vi2x3456 = ( vi26, vi25, vi24, vi23 )
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x3456, vk10));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x3456, vk20));
vi0x3012 = vi0x7456;
vi1x3012 = vi1x7456;
vi2x3012 = vi2x7456;
// vi0x8567 = ( vi07, vi06, vi05, vi08 )
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
// vi1x8567 = ( vi17, vi16, vi15, vi18 )
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
// vi2x8567 = ( vi27, vi26, vi25, vi28 )
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
// vi0x5678 = ( vi08, vi07, vi06, vi05 )
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi1x5678 = ( vi18, vi17, vi16, vi15 )
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi2x5678 = ( vi28, vi27, vi26, vi25 )
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk12));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vmask, vi0x4567);
vi1x4567 = _mm_and_ps(vmask, vi1x4567);
vi2x4567 = _mm_and_ps(vmask, vi2x4567);
// vi0x7456 = ( vi06, vi05, vi04, vi07 )
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi1x7456 = ( vi16, vi15, vi14, vi17 )
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi2x7456 = ( vi26, vi25, vi24, vi27 )
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
__m128 vo0p2 = _mm_mul_ps(vi2x4567, vk21);
// vi0x3456 = ( vi06, vi05, vi04, vi03 )
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
// vi1x3456 = ( vi16, vi15, vi14, vi13 )
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
// vi2x3456 = ( vi26, vi25, vi24, vi23 )
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x3456, vk10));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x3456, vk20));
const __m128 vzero = _mm_setzero_ps();
// vi0x8567 = ( vi07, vi06, vi05, 0.0 )
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
// vi1x8567 = ( vi17, vi16, vi15, 0.0 )
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
// vi2x8567 = ( vi27, vi26, vi25, 0.0 )
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
// vi0x5678 = ( vi08, vi07, vi06, vi05 )
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi1x5678 = ( vi18, vi17, vi16, vi15 )
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi2x5678 = ( vi28, vi27, vi26, vi25 )
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk12));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x5678, vk22));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 8,188 | 35.234513 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-sse-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk10 = _mm_load1_ps(weights + 4);
const __m128 vk11 = _mm_load1_ps(weights + 5);
const __m128 vk12 = _mm_load1_ps(weights + 6);
const __m128 vk20 = _mm_load1_ps(weights + 7);
const __m128 vk21 = _mm_load1_ps(weights + 8);
const __m128 vk22 = _mm_load1_ps(weights + 9);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
// vi0x3012 = ( vi02, vi01, vi{M}0, vi{M}3 )
__m128 vi0x3012 = _mm_setzero_ps();
// vi1x3012 = ( vi12, vi11, vi{M}0, vi{M}3 )
__m128 vi1x3012 = _mm_setzero_ps();
// vi2x3012 = ( vi22, vi21, vi{M}0, vi{M}3 )
__m128 vi2x3012 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
// vi0x89AB = ( vi0B, vi0A, vi09, vi08 )
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
// vi1x89AB = ( vi1B, vi1A, vi19, vi18 )
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
// vi2x89AB = ( vi2B, vi2A, vi29, vi28 )
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
// vi0x7456 = ( vi06, vi05, vi04, vi07 )
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi1x7456 = ( vi16, vi15, vi14, vi17 )
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi2x7456 = ( vi26, vi25, vi24, vi27 )
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
__m128 vo0p2 = _mm_mul_ps(vi2x4567, vk21);
// vi0x3456 = ( vi06, vi05, vi04, vi03 )
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
// vi1x3456 = ( vi16, vi15, vi14, vi13 )
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
// vi2x3456 = ( vi26, vi25, vi24, vi23 )
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
__m128 vo0p3 = _mm_mul_ps(vi0x3456, vk00);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk20));
vi0x3012 = vi0x7456;
vi1x3012 = vi1x7456;
vi2x3012 = vi2x7456;
// vi0x8567 = ( vi07, vi06, vi05, vi08 )
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
// vi1x8567 = ( vi17, vi16, vi15, vi18 )
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
// vi2x8567 = ( vi27, vi26, vi25, vi28 )
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
// vi0x5678 = ( vi08, vi07, vi06, vi05 )
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi1x5678 = ( vi18, vi17, vi16, vi15 )
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi2x5678 = ( vi28, vi27, vi26, vi25 )
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x5678, vk02));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi1x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p2 = _mm_add_ps(vo0p2, vo0p3);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vmask, vi0x4567);
vi1x4567 = _mm_and_ps(vmask, vi1x4567);
vi2x4567 = _mm_and_ps(vmask, vi2x4567);
// vi0x7456 = ( vi06, vi05, vi04, vi07 )
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi1x7456 = ( vi16, vi15, vi14, vi17 )
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi2x7456 = ( vi26, vi25, vi24, vi27 )
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
__m128 vo0p2 = _mm_mul_ps(vi2x4567, vk21);
// vi0x3456 = ( vi06, vi05, vi04, vi03 )
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
// vi1x3456 = ( vi16, vi15, vi14, vi13 )
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
// vi2x3456 = ( vi26, vi25, vi24, vi23 )
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
__m128 vo0p3 = _mm_mul_ps(vi0x3456, vk00);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk20));
const __m128 vzero = _mm_setzero_ps();
// vi0x8567 = ( vi07, vi06, vi05, 0.0 )
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
// vi1x8567 = ( vi17, vi16, vi15, 0.0 )
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
// vi2x8567 = ( vi27, vi26, vi25, 0.0 )
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
// vi0x5678 = ( vi08, vi07, vi06, vi05 )
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi1x5678 = ( vi18, vi17, vi16, vi15 )
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi2x5678 = ( vi28, vi27, vi26, vi25 )
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x5678, vk02));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi1x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p2 = _mm_add_ps(vo0p2, vo0p3);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 8,244 | 35.162281 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-sse-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk10 = _mm_load1_ps(weights + 4);
const __m128 vk11 = _mm_load1_ps(weights + 5);
const __m128 vk12 = _mm_load1_ps(weights + 6);
const __m128 vk20 = _mm_load1_ps(weights + 7);
const __m128 vk21 = _mm_load1_ps(weights + 8);
const __m128 vk22 = _mm_load1_ps(weights + 9);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
// vi0x3012 = ( vi02, vi01, vi{M}0, vi{M}3 )
__m128 vi0x3012 = _mm_setzero_ps();
// vi1x3012 = ( vi12, vi11, vi{M}0, vi{M}3 )
__m128 vi1x3012 = _mm_setzero_ps();
// vi2x3012 = ( vi22, vi21, vi{M}0, vi{M}3 )
__m128 vi2x3012 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
// vi0x89AB = ( vi0B, vi0A, vi09, vi08 )
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
// vi1x89AB = ( vi1B, vi1A, vi19, vi18 )
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
// vi2x89AB = ( vi2B, vi2A, vi29, vi28 )
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
// vi0x7456 = ( vi06, vi05, vi04, vi07 )
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi1x7456 = ( vi16, vi15, vi14, vi17 )
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi2x7456 = ( vi26, vi25, vi24, vi27 )
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
// vi0x3456 = ( vi06, vi05, vi04, vi03 )
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
// vi1x3456 = ( vi16, vi15, vi14, vi13 )
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
// vi2x3456 = ( vi26, vi25, vi24, vi23 )
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
vi0x3012 = vi0x7456;
vi1x3012 = vi1x7456;
vi2x3012 = vi2x7456;
// vi0x8567 = ( vi07, vi06, vi05, vi08 )
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
// vi1x8567 = ( vi17, vi16, vi15, vi18 )
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
// vi2x8567 = ( vi27, vi26, vi25, vi28 )
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
// vi0x5678 = ( vi08, vi07, vi06, vi05 )
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi1x5678 = ( vi18, vi17, vi16, vi15 )
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi2x5678 = ( vi28, vi27, vi26, vi25 )
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vmask, vi0x4567);
vi1x4567 = _mm_and_ps(vmask, vi1x4567);
vi2x4567 = _mm_and_ps(vmask, vi2x4567);
// vi0x7456 = ( vi06, vi05, vi04, vi07 )
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi1x7456 = ( vi16, vi15, vi14, vi17 )
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi2x7456 = ( vi26, vi25, vi24, vi27 )
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
// vi0x3456 = ( vi06, vi05, vi04, vi03 )
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
// vi1x3456 = ( vi16, vi15, vi14, vi13 )
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
// vi2x3456 = ( vi26, vi25, vi24, vi23 )
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
const __m128 vzero = _mm_setzero_ps();
// vi0x8567 = ( vi07, vi06, vi05, 0.0 )
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
// vi1x8567 = ( vi17, vi16, vi15, 0.0 )
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
// vi2x8567 = ( vi27, vi26, vi25, 0.0 )
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
// vi0x5678 = ( vi08, vi07, vi06, vi05 )
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi1x5678 = ( vi18, vi17, vi16, vi15 )
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi2x5678 = ( vi28, vi27, vi26, vi25 )
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 8,071 | 35.36036 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-sse-2x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk10 = _mm_load1_ps(weights + 4);
const __m128 vk11 = _mm_load1_ps(weights + 5);
const __m128 vk12 = _mm_load1_ps(weights + 6);
const __m128 vk20 = _mm_load1_ps(weights + 7);
const __m128 vk21 = _mm_load1_ps(weights + 8);
const __m128 vk22 = _mm_load1_ps(weights + 9);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
}
// vi0x3012 = ( vi02, vi01, vi{M}0, vi{M}3 )
__m128 vi0x3012 = _mm_setzero_ps();
// vi1x3012 = ( vi12, vi11, vi{M}0, vi{M}3 )
__m128 vi1x3012 = _mm_setzero_ps();
// vi2x3012 = ( vi22, vi21, vi{M}0, vi{M}3 )
__m128 vi2x3012 = _mm_setzero_ps();
// vi3x3012 = ( vi32, vi31, vi{M}0, vi{M}3 )
__m128 vi3x3012 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vi3x4567 = _mm_loadu_ps(i3);
i3 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
// vi0x89AB = ( vi0B, vi0A, vi09, vi08 )
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
// vi1x89AB = ( vi1B, vi1A, vi19, vi18 )
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
// vi2x89AB = ( vi2B, vi2A, vi29, vi28 )
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
// vi3x89AB = ( vi3B, vi3A, vi39, vi38 )
const __m128 vi3x89AB = _mm_loadu_ps(i3);
i3 += 4;
// vi0x7456 = ( vi06, vi05, vi04, vi07 )
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi1x7456 = ( vi16, vi15, vi14, vi17 )
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi2x7456 = ( vi26, vi25, vi24, vi27 )
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi3x7456 = ( vi36, vi35, vi34, vi37 )
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
__m128 vo1p1 = _mm_mul_ps(vi2x4567, vk11);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
// vi0x3456 = ( vi06, vi05, vi04, vi03 )
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
// vi1x3456 = ( vi16, vi15, vi14, vi13 )
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
// vi2x3456 = ( vi26, vi25, vi24, vi23 )
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
// vi3x3456 = ( vi36, vi35, vi34, vi33 )
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x3456, vk00));
vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi1x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk20));
vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi3x3456, vk20));
vi0x3012 = vi0x7456;
vi1x3012 = vi1x7456;
vi2x3012 = vi2x7456;
vi3x3012 = vi3x7456;
// vi0x8567 = ( vi07, vi06, vi05, vi08 )
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
// vi1x8567 = ( vi17, vi16, vi15, vi18 )
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
// vi2x8567 = ( vi27, vi26, vi25, vi28 )
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
// vi3x8567 = ( vi37, vi36, vi35, vi38 )
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
// vi0x5678 = ( vi08, vi07, vi06, vi05 )
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi1x5678 = ( vi18, vi17, vi16, vi15 )
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi2x5678 = ( vi28, vi27, vi26, vi25 )
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi3x5678 = ( vi38, vi37, vi36, vi35 )
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk12));
vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi2x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo1p0 = _mm_add_ps(vo1p0, vo1p1);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
__m128 vo1 = _mm_max_ps(vo1p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
vo1 = _mm_min_ps(vo1, vmax);
_mm_storeu_ps(o1, vo1);
o1 += 4;
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vmask, vi0x4567);
vi1x4567 = _mm_and_ps(vmask, vi1x4567);
vi2x4567 = _mm_and_ps(vmask, vi2x4567);
vi3x4567 = _mm_and_ps(vmask, vi3x4567);
// vi0x7456 = ( vi06, vi05, vi04, vi07 )
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi1x7456 = ( vi16, vi15, vi14, vi17 )
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi2x7456 = ( vi26, vi25, vi24, vi27 )
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi3x7456 = ( vi36, vi35, vi34, vi37 )
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
__m128 vo1p1 = _mm_mul_ps(vi2x4567, vk11);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
// vi0x3456 = ( vi06, vi05, vi04, vi03 )
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
// vi1x3456 = ( vi16, vi15, vi14, vi13 )
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
// vi2x3456 = ( vi26, vi25, vi24, vi23 )
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
// vi3x3456 = ( vi36, vi35, vi34, vi33 )
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x3456, vk00));
vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi1x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk20));
vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi3x3456, vk20));
const __m128 vzero = _mm_setzero_ps();
// vi0x8567 = ( vi07, vi06, vi05, 0.0 )
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
// vi1x8567 = ( vi17, vi16, vi15, 0.0 )
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
// vi2x8567 = ( vi27, vi26, vi25, 0.0 )
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
// vi3x8567 = ( vi37, vi36, vi35, 0.0 )
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vzero);
// vi0x5678 = ( vi08, vi07, vi06, vi05 )
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi1x5678 = ( vi18, vi17, vi16, vi15 )
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi2x5678 = ( vi28, vi27, vi26, vi25 )
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi3x5678 = ( vi38, vi37, vi36, vi35 )
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk12));
vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi2x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo1p0 = _mm_add_ps(vo1p0, vo1p1);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
__m128 vo1 = _mm_max_ps(vo1p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
vo1 = _mm_min_ps(vo1, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
_mm_storeu_ps(o1, vo1);
o1 += 4;
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o1, vo1);
o1 += 2;
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
vo1 = _mm_movehl_ps(vo1, vo1);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o1, vo1);
o1 += 1;
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 11,413 | 37.955631 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-sse-2x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk10 = _mm_load1_ps(weights + 4);
const __m128 vk11 = _mm_load1_ps(weights + 5);
const __m128 vk12 = _mm_load1_ps(weights + 6);
const __m128 vk20 = _mm_load1_ps(weights + 7);
const __m128 vk21 = _mm_load1_ps(weights + 8);
const __m128 vk22 = _mm_load1_ps(weights + 9);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
}
// vi0x3012 = ( vi02, vi01, vi{M}0, vi{M}3 )
__m128 vi0x3012 = _mm_setzero_ps();
// vi1x3012 = ( vi12, vi11, vi{M}0, vi{M}3 )
__m128 vi1x3012 = _mm_setzero_ps();
// vi2x3012 = ( vi22, vi21, vi{M}0, vi{M}3 )
__m128 vi2x3012 = _mm_setzero_ps();
// vi3x3012 = ( vi32, vi31, vi{M}0, vi{M}3 )
__m128 vi3x3012 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vi3x4567 = _mm_loadu_ps(i3);
i3 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
// vi0x89AB = ( vi0B, vi0A, vi09, vi08 )
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
// vi1x89AB = ( vi1B, vi1A, vi19, vi18 )
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
// vi2x89AB = ( vi2B, vi2A, vi29, vi28 )
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
// vi3x89AB = ( vi3B, vi3A, vi39, vi38 )
const __m128 vi3x89AB = _mm_loadu_ps(i3);
i3 += 4;
// vi0x7456 = ( vi06, vi05, vi04, vi07 )
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi1x7456 = ( vi16, vi15, vi14, vi17 )
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi2x7456 = ( vi26, vi25, vi24, vi27 )
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi3x7456 = ( vi36, vi35, vi34, vi37 )
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
// vi0x3456 = ( vi06, vi05, vi04, vi03 )
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
// vi1x3456 = ( vi16, vi15, vi14, vi13 )
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
// vi2x3456 = ( vi26, vi25, vi24, vi23 )
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
// vi3x3456 = ( vi36, vi35, vi34, vi33 )
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
vi0x3012 = vi0x7456;
vi1x3012 = vi1x7456;
vi2x3012 = vi2x7456;
vi3x3012 = vi3x7456;
// vi0x8567 = ( vi07, vi06, vi05, vi08 )
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
// vi1x8567 = ( vi17, vi16, vi15, vi18 )
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
// vi2x8567 = ( vi27, vi26, vi25, vi28 )
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
// vi3x8567 = ( vi37, vi36, vi35, vi38 )
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
// vi0x5678 = ( vi08, vi07, vi06, vi05 )
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi1x5678 = ( vi18, vi17, vi16, vi15 )
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi2x5678 = ( vi28, vi27, vi26, vi25 )
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi3x5678 = ( vi38, vi37, vi36, vi35 )
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
__m128 vo1 = _mm_max_ps(vo1p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
vo1 = _mm_min_ps(vo1, vmax);
_mm_storeu_ps(o1, vo1);
o1 += 4;
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vmask, vi0x4567);
vi1x4567 = _mm_and_ps(vmask, vi1x4567);
vi2x4567 = _mm_and_ps(vmask, vi2x4567);
vi3x4567 = _mm_and_ps(vmask, vi3x4567);
// vi0x7456 = ( vi06, vi05, vi04, vi07 )
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi1x7456 = ( vi16, vi15, vi14, vi17 )
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi2x7456 = ( vi26, vi25, vi24, vi27 )
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi3x7456 = ( vi36, vi35, vi34, vi37 )
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
// vi0x3456 = ( vi06, vi05, vi04, vi03 )
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
// vi1x3456 = ( vi16, vi15, vi14, vi13 )
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
// vi2x3456 = ( vi26, vi25, vi24, vi23 )
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
// vi3x3456 = ( vi36, vi35, vi34, vi33 )
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
const __m128 vzero = _mm_setzero_ps();
// vi0x8567 = ( vi07, vi06, vi05, 0.0 )
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
// vi1x8567 = ( vi17, vi16, vi15, 0.0 )
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
// vi2x8567 = ( vi27, vi26, vi25, 0.0 )
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
// vi3x8567 = ( vi37, vi36, vi35, 0.0 )
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vzero);
// vi0x5678 = ( vi08, vi07, vi06, vi05 )
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi1x5678 = ( vi18, vi17, vi16, vi15 )
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi2x5678 = ( vi28, vi27, vi26, vi25 )
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi3x5678 = ( vi38, vi37, vi36, vi35 )
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
__m128 vo1 = _mm_max_ps(vo1p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
vo1 = _mm_min_ps(vo1, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
_mm_storeu_ps(o1, vo1);
o1 += 4;
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o1, vo1);
o1 += 2;
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
vo1 = _mm_movehl_ps(vo1, vo1);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o1, vo1);
o1 += 1;
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 11,296 | 38.089965 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-sse-3x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_3x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk10 = _mm_load1_ps(weights + 4);
const __m128 vk11 = _mm_load1_ps(weights + 5);
const __m128 vk12 = _mm_load1_ps(weights + 6);
const __m128 vk20 = _mm_load1_ps(weights + 7);
const __m128 vk21 = _mm_load1_ps(weights + 8);
const __m128 vk22 = _mm_load1_ps(weights + 9);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
}
// vi0x3012 = ( vi02, vi01, vi{M}0, vi{M}3 )
__m128 vi0x3012 = _mm_setzero_ps();
// vi1x3012 = ( vi12, vi11, vi{M}0, vi{M}3 )
__m128 vi1x3012 = _mm_setzero_ps();
// vi2x3012 = ( vi22, vi21, vi{M}0, vi{M}3 )
__m128 vi2x3012 = _mm_setzero_ps();
// vi3x3012 = ( vi32, vi31, vi{M}0, vi{M}3 )
__m128 vi3x3012 = _mm_setzero_ps();
// vi4x3012 = ( vi42, vi41, vi{M}0, vi{M}3 )
__m128 vi4x3012 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vi3x4567 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vi4x4567 = _mm_loadu_ps(i4);
i4 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
// vi0x89AB = ( vi0B, vi0A, vi09, vi08 )
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
// vi1x89AB = ( vi1B, vi1A, vi19, vi18 )
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
// vi2x89AB = ( vi2B, vi2A, vi29, vi28 )
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
// vi3x89AB = ( vi3B, vi3A, vi39, vi38 )
const __m128 vi3x89AB = _mm_loadu_ps(i3);
i3 += 4;
// vi4x89AB = ( vi4B, vi4A, vi49, vi48 )
const __m128 vi4x89AB = _mm_loadu_ps(i4);
i4 += 4;
// vi0x7456 = ( vi06, vi05, vi04, vi07 )
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi1x7456 = ( vi16, vi15, vi14, vi17 )
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi2x7456 = ( vi26, vi25, vi24, vi27 )
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi3x7456 = ( vi36, vi35, vi34, vi37 )
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi4x7456 = ( vi46, vi45, vi44, vi47 )
const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
__m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
// vi0x3456 = ( vi06, vi05, vi04, vi03 )
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
// vi1x3456 = ( vi16, vi15, vi14, vi13 )
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
// vi2x3456 = ( vi26, vi25, vi24, vi23 )
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
// vi3x3456 = ( vi36, vi35, vi34, vi33 )
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
// vi4x3456 = ( vi46, vi45, vi44, vi43 )
const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
vi0x3012 = vi0x7456;
vi1x3012 = vi1x7456;
vi2x3012 = vi2x7456;
vi3x3012 = vi3x7456;
vi4x3012 = vi4x7456;
// vi0x8567 = ( vi07, vi06, vi05, vi08 )
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
// vi1x8567 = ( vi17, vi16, vi15, vi18 )
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
// vi2x8567 = ( vi27, vi26, vi25, vi28 )
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
// vi3x8567 = ( vi37, vi36, vi35, vi38 )
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
// vi4x8567 = ( vi47, vi46, vi45, vi48 )
const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vi4x89AB);
// vi0x5678 = ( vi08, vi07, vi06, vi05 )
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi1x5678 = ( vi18, vi17, vi16, vi15 )
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi2x5678 = ( vi28, vi27, vi26, vi25 )
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi3x5678 = ( vi38, vi37, vi36, vi35 )
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi4x5678 = ( vi48, vi47, vi46, vi45 )
const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
__m128 vo1 = _mm_max_ps(vo1p0, vmin);
__m128 vo2 = _mm_max_ps(vo2p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
vo1 = _mm_min_ps(vo1, vmax);
vo2 = _mm_min_ps(vo2, vmax);
_mm_storeu_ps(o2, vo2);
o2 += 4;
_mm_storeu_ps(o1, vo1);
o1 += 4;
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vmask, vi0x4567);
vi1x4567 = _mm_and_ps(vmask, vi1x4567);
vi2x4567 = _mm_and_ps(vmask, vi2x4567);
vi3x4567 = _mm_and_ps(vmask, vi3x4567);
vi4x4567 = _mm_and_ps(vmask, vi4x4567);
// vi0x7456 = ( vi06, vi05, vi04, vi07 )
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi1x7456 = ( vi16, vi15, vi14, vi17 )
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi2x7456 = ( vi26, vi25, vi24, vi27 )
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi3x7456 = ( vi36, vi35, vi34, vi37 )
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi4x7456 = ( vi46, vi45, vi44, vi47 )
const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
__m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
// vi0x3456 = ( vi06, vi05, vi04, vi03 )
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
// vi1x3456 = ( vi16, vi15, vi14, vi13 )
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
// vi2x3456 = ( vi26, vi25, vi24, vi23 )
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
// vi3x3456 = ( vi36, vi35, vi34, vi33 )
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
// vi4x3456 = ( vi46, vi45, vi44, vi43 )
const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
const __m128 vzero = _mm_setzero_ps();
// vi0x8567 = ( vi07, vi06, vi05, 0.0 )
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
// vi1x8567 = ( vi17, vi16, vi15, 0.0 )
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
// vi2x8567 = ( vi27, vi26, vi25, 0.0 )
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
// vi3x8567 = ( vi37, vi36, vi35, 0.0 )
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vzero);
// vi4x8567 = ( vi47, vi46, vi45, 0.0 )
const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vzero);
// vi0x5678 = ( vi08, vi07, vi06, vi05 )
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi1x5678 = ( vi18, vi17, vi16, vi15 )
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi2x5678 = ( vi28, vi27, vi26, vi25 )
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi3x5678 = ( vi38, vi37, vi36, vi35 )
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi4x5678 = ( vi48, vi47, vi46, vi45 )
const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
__m128 vo1 = _mm_max_ps(vo1p0, vmin);
__m128 vo2 = _mm_max_ps(vo2p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
vo1 = _mm_min_ps(vo1, vmax);
vo2 = _mm_min_ps(vo2, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
_mm_storeu_ps(o2, vo2);
o2 += 4;
_mm_storeu_ps(o1, vo1);
o1 += 4;
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o2, vo2);
o2 += 2;
_mm_storel_pi((__m64*) o1, vo1);
o1 += 2;
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
vo1 = _mm_movehl_ps(vo1, vo1);
vo2 = _mm_movehl_ps(vo2, vo2);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o2, vo2);
o2 += 1;
_mm_store_ss(o1, vo1);
o1 += 1;
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i3 - input_decrement);
i1 = (const float*) ((uintptr_t) i4 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
o0 = o2;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
output_height = doz(output_height, 3);
} while (output_height != 0);
}
| 14,467 | 39.870056 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-sse-4x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_4x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk10 = _mm_load1_ps(weights + 4);
const __m128 vk11 = _mm_load1_ps(weights + 5);
const __m128 vk12 = _mm_load1_ps(weights + 6);
const __m128 vk20 = _mm_load1_ps(weights + 7);
const __m128 vk21 = _mm_load1_ps(weights + 8);
const __m128 vk22 = _mm_load1_ps(weights + 9);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
}
// vi0x3012 = ( vi02, vi01, vi{M}0, vi{M}3 )
__m128 vi0x3012 = _mm_setzero_ps();
// vi1x3012 = ( vi12, vi11, vi{M}0, vi{M}3 )
__m128 vi1x3012 = _mm_setzero_ps();
// vi2x3012 = ( vi22, vi21, vi{M}0, vi{M}3 )
__m128 vi2x3012 = _mm_setzero_ps();
// vi3x3012 = ( vi32, vi31, vi{M}0, vi{M}3 )
__m128 vi3x3012 = _mm_setzero_ps();
// vi4x3012 = ( vi42, vi41, vi{M}0, vi{M}3 )
__m128 vi4x3012 = _mm_setzero_ps();
// vi5x3012 = ( vi52, vi51, vi{M}0, vi{M}3 )
__m128 vi5x3012 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vi3x4567 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vi4x4567 = _mm_loadu_ps(i4);
i4 += 4;
__m128 vi5x4567 = _mm_loadu_ps(i5);
i5 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
// vi0x89AB = ( vi0B, vi0A, vi09, vi08 )
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
// vi1x89AB = ( vi1B, vi1A, vi19, vi18 )
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
// vi2x89AB = ( vi2B, vi2A, vi29, vi28 )
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
// vi3x89AB = ( vi3B, vi3A, vi39, vi38 )
const __m128 vi3x89AB = _mm_loadu_ps(i3);
i3 += 4;
// vi4x89AB = ( vi4B, vi4A, vi49, vi48 )
const __m128 vi4x89AB = _mm_loadu_ps(i4);
i4 += 4;
// vi5x89AB = ( vi5B, vi5A, vi59, vi58 )
const __m128 vi5x89AB = _mm_loadu_ps(i5);
i5 += 4;
// vi0x7456 = ( vi06, vi05, vi04, vi07 )
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi1x7456 = ( vi16, vi15, vi14, vi17 )
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi2x7456 = ( vi26, vi25, vi24, vi27 )
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi3x7456 = ( vi36, vi35, vi34, vi37 )
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi4x7456 = ( vi46, vi45, vi44, vi47 )
const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi5x7456 = ( vi56, vi55, vi54, vi57 )
const __m128 vi5x7456 = _mm_shuffle_ps(vi5x4567, vi5x4567, _MM_SHUFFLE(2, 1, 0, 3));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
__m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
__m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi3x4567, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x4567, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x4567, vk21));
// vi0x3456 = ( vi06, vi05, vi04, vi03 )
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
// vi1x3456 = ( vi16, vi15, vi14, vi13 )
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
// vi2x3456 = ( vi26, vi25, vi24, vi23 )
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
// vi3x3456 = ( vi36, vi35, vi34, vi33 )
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
// vi4x3456 = ( vi46, vi45, vi44, vi43 )
const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
// vi5x3456 = ( vi56, vi55, vi54, vi53 )
const __m128 vi5x3456 = _mm_move_ss(vi5x7456, vi5x3012);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x3456, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x3456, vk20));
vi0x3012 = vi0x7456;
vi1x3012 = vi1x7456;
vi2x3012 = vi2x7456;
vi3x3012 = vi3x7456;
vi4x3012 = vi4x7456;
vi5x3012 = vi5x7456;
// vi0x8567 = ( vi07, vi06, vi05, vi08 )
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
// vi1x8567 = ( vi17, vi16, vi15, vi18 )
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
// vi2x8567 = ( vi27, vi26, vi25, vi28 )
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
// vi3x8567 = ( vi37, vi36, vi35, vi38 )
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
// vi4x8567 = ( vi47, vi46, vi45, vi48 )
const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vi4x89AB);
// vi5x8567 = ( vi57, vi56, vi55, vi58 )
const __m128 vi5x8567 = _mm_move_ss(vi5x4567, vi5x89AB);
// vi0x5678 = ( vi08, vi07, vi06, vi05 )
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi1x5678 = ( vi18, vi17, vi16, vi15 )
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi2x5678 = ( vi28, vi27, vi26, vi25 )
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi3x5678 = ( vi38, vi37, vi36, vi35 )
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi4x5678 = ( vi48, vi47, vi46, vi45 )
const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi5x5678 = ( vi58, vi57, vi56, vi55 )
const __m128 vi5x5678 = _mm_shuffle_ps(vi5x8567, vi5x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x5678, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
vi5x4567 = vi5x89AB;
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
__m128 vo1 = _mm_max_ps(vo1p0, vmin);
__m128 vo2 = _mm_max_ps(vo2p0, vmin);
__m128 vo3 = _mm_max_ps(vo3p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
vo1 = _mm_min_ps(vo1, vmax);
vo2 = _mm_min_ps(vo2, vmax);
vo3 = _mm_min_ps(vo3, vmax);
_mm_storeu_ps(o3, vo3);
o3 += 4;
_mm_storeu_ps(o2, vo2);
o2 += 4;
_mm_storeu_ps(o1, vo1);
o1 += 4;
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vmask, vi0x4567);
vi1x4567 = _mm_and_ps(vmask, vi1x4567);
vi2x4567 = _mm_and_ps(vmask, vi2x4567);
vi3x4567 = _mm_and_ps(vmask, vi3x4567);
vi4x4567 = _mm_and_ps(vmask, vi4x4567);
vi5x4567 = _mm_and_ps(vmask, vi5x4567);
// vi0x7456 = ( vi06, vi05, vi04, vi07 )
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi1x7456 = ( vi16, vi15, vi14, vi17 )
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi2x7456 = ( vi26, vi25, vi24, vi27 )
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi3x7456 = ( vi36, vi35, vi34, vi37 )
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi4x7456 = ( vi46, vi45, vi44, vi47 )
const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi5x7456 = ( vi56, vi55, vi54, vi57 )
const __m128 vi5x7456 = _mm_shuffle_ps(vi5x4567, vi5x4567, _MM_SHUFFLE(2, 1, 0, 3));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
__m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
__m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi3x4567, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x4567, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x4567, vk21));
// vi0x3456 = ( vi06, vi05, vi04, vi03 )
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
// vi1x3456 = ( vi16, vi15, vi14, vi13 )
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
// vi2x3456 = ( vi26, vi25, vi24, vi23 )
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
// vi3x3456 = ( vi36, vi35, vi34, vi33 )
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
// vi4x3456 = ( vi46, vi45, vi44, vi43 )
const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
// vi5x3456 = ( vi56, vi55, vi54, vi53 )
const __m128 vi5x3456 = _mm_move_ss(vi5x7456, vi5x3012);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x3456, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x3456, vk20));
const __m128 vzero = _mm_setzero_ps();
// vi0x8567 = ( vi07, vi06, vi05, 0.0 )
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
// vi1x8567 = ( vi17, vi16, vi15, 0.0 )
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
// vi2x8567 = ( vi27, vi26, vi25, 0.0 )
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
// vi3x8567 = ( vi37, vi36, vi35, 0.0 )
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vzero);
// vi4x8567 = ( vi47, vi46, vi45, 0.0 )
const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vzero);
// vi5x8567 = ( vi57, vi56, vi55, 0.0 )
const __m128 vi5x8567 = _mm_move_ss(vi5x4567, vzero);
// vi0x5678 = ( vi08, vi07, vi06, vi05 )
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi1x5678 = ( vi18, vi17, vi16, vi15 )
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi2x5678 = ( vi28, vi27, vi26, vi25 )
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi3x5678 = ( vi38, vi37, vi36, vi35 )
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi4x5678 = ( vi48, vi47, vi46, vi45 )
const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi5x5678 = ( vi58, vi57, vi56, vi55 )
const __m128 vi5x5678 = _mm_shuffle_ps(vi5x8567, vi5x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x5678, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x5678, vk22));
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
__m128 vo1 = _mm_max_ps(vo1p0, vmin);
__m128 vo2 = _mm_max_ps(vo2p0, vmin);
__m128 vo3 = _mm_max_ps(vo3p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
vo1 = _mm_min_ps(vo1, vmax);
vo2 = _mm_min_ps(vo2, vmax);
vo3 = _mm_min_ps(vo3, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
_mm_storeu_ps(o3, vo3);
o3 += 4;
_mm_storeu_ps(o2, vo2);
o2 += 4;
_mm_storeu_ps(o1, vo1);
o1 += 4;
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o3, vo3);
o3 += 2;
_mm_storel_pi((__m64*) o2, vo2);
o2 += 2;
_mm_storel_pi((__m64*) o1, vo1);
o1 += 2;
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
vo1 = _mm_movehl_ps(vo1, vo1);
vo2 = _mm_movehl_ps(vo2, vo2);
vo3 = _mm_movehl_ps(vo3, vo3);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o3, vo3);
o3 += 1;
_mm_store_ss(o2, vo2);
o2 += 1;
_mm_store_ss(o1, vo1);
o1 += 1;
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i4 - input_decrement);
i1 = (const float*) ((uintptr_t) i5 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
o0 = o3;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
output_height = doz(output_height, 4);
} while (output_height != 0);
}
| 17,638 | 41.097852 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-sse-5x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_5x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk10 = _mm_load1_ps(weights + 4);
const __m128 vk11 = _mm_load1_ps(weights + 5);
const __m128 vk12 = _mm_load1_ps(weights + 6);
const __m128 vk20 = _mm_load1_ps(weights + 7);
const __m128 vk21 = _mm_load1_ps(weights + 8);
const __m128 vk22 = _mm_load1_ps(weights + 9);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
float* o4 = (float*) ((uintptr_t) o3 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
o4 = o3;
}
if XNN_UNPREDICTABLE(output_height < 6) {
i6 = zero;
}
// vi0x3012 = ( vi02, vi01, vi{M}0, vi{M}3 )
__m128 vi0x3012 = _mm_setzero_ps();
// vi1x3012 = ( vi12, vi11, vi{M}0, vi{M}3 )
__m128 vi1x3012 = _mm_setzero_ps();
// vi2x3012 = ( vi22, vi21, vi{M}0, vi{M}3 )
__m128 vi2x3012 = _mm_setzero_ps();
// vi3x3012 = ( vi32, vi31, vi{M}0, vi{M}3 )
__m128 vi3x3012 = _mm_setzero_ps();
// vi4x3012 = ( vi42, vi41, vi{M}0, vi{M}3 )
__m128 vi4x3012 = _mm_setzero_ps();
// vi5x3012 = ( vi52, vi51, vi{M}0, vi{M}3 )
__m128 vi5x3012 = _mm_setzero_ps();
// vi6x3012 = ( vi62, vi61, vi{M}0, vi{M}3 )
__m128 vi6x3012 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vi3x4567 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vi4x4567 = _mm_loadu_ps(i4);
i4 += 4;
__m128 vi5x4567 = _mm_loadu_ps(i5);
i5 += 4;
__m128 vi6x4567 = _mm_loadu_ps(i6);
i6 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
// vi0x89AB = ( vi0B, vi0A, vi09, vi08 )
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
// vi1x89AB = ( vi1B, vi1A, vi19, vi18 )
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
// vi2x89AB = ( vi2B, vi2A, vi29, vi28 )
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
// vi3x89AB = ( vi3B, vi3A, vi39, vi38 )
const __m128 vi3x89AB = _mm_loadu_ps(i3);
i3 += 4;
// vi4x89AB = ( vi4B, vi4A, vi49, vi48 )
const __m128 vi4x89AB = _mm_loadu_ps(i4);
i4 += 4;
// vi5x89AB = ( vi5B, vi5A, vi59, vi58 )
const __m128 vi5x89AB = _mm_loadu_ps(i5);
i5 += 4;
// vi6x89AB = ( vi6B, vi6A, vi69, vi68 )
const __m128 vi6x89AB = _mm_loadu_ps(i6);
i6 += 4;
// vi0x7456 = ( vi06, vi05, vi04, vi07 )
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi1x7456 = ( vi16, vi15, vi14, vi17 )
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi2x7456 = ( vi26, vi25, vi24, vi27 )
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi3x7456 = ( vi36, vi35, vi34, vi37 )
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi4x7456 = ( vi46, vi45, vi44, vi47 )
const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi5x7456 = ( vi56, vi55, vi54, vi57 )
const __m128 vi5x7456 = _mm_shuffle_ps(vi5x4567, vi5x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi6x7456 = ( vi66, vi65, vi64, vi67 )
const __m128 vi6x7456 = _mm_shuffle_ps(vi6x4567, vi6x4567, _MM_SHUFFLE(2, 1, 0, 3));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
__m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
__m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi3x4567, vk01));
__m128 vo4p0 = _mm_add_ps(vbias, _mm_mul_ps(vi4x4567, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x4567, vk11));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x4567, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x4567, vk21));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x4567, vk21));
// vi0x3456 = ( vi06, vi05, vi04, vi03 )
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
// vi1x3456 = ( vi16, vi15, vi14, vi13 )
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
// vi2x3456 = ( vi26, vi25, vi24, vi23 )
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
// vi3x3456 = ( vi36, vi35, vi34, vi33 )
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
// vi4x3456 = ( vi46, vi45, vi44, vi43 )
const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
// vi5x3456 = ( vi56, vi55, vi54, vi53 )
const __m128 vi5x3456 = _mm_move_ss(vi5x7456, vi5x3012);
// vi6x3456 = ( vi66, vi65, vi64, vi63 )
const __m128 vi6x3456 = _mm_move_ss(vi6x7456, vi6x3012);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x3456, vk00));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x3456, vk10));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x3456, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x3456, vk20));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x3456, vk20));
vi0x3012 = vi0x7456;
vi1x3012 = vi1x7456;
vi2x3012 = vi2x7456;
vi3x3012 = vi3x7456;
vi4x3012 = vi4x7456;
vi5x3012 = vi5x7456;
vi6x3012 = vi6x7456;
// vi0x8567 = ( vi07, vi06, vi05, vi08 )
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
// vi1x8567 = ( vi17, vi16, vi15, vi18 )
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
// vi2x8567 = ( vi27, vi26, vi25, vi28 )
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
// vi3x8567 = ( vi37, vi36, vi35, vi38 )
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
// vi4x8567 = ( vi47, vi46, vi45, vi48 )
const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vi4x89AB);
// vi5x8567 = ( vi57, vi56, vi55, vi58 )
const __m128 vi5x8567 = _mm_move_ss(vi5x4567, vi5x89AB);
// vi6x8567 = ( vi67, vi66, vi65, vi68 )
const __m128 vi6x8567 = _mm_move_ss(vi6x4567, vi6x89AB);
// vi0x5678 = ( vi08, vi07, vi06, vi05 )
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi1x5678 = ( vi18, vi17, vi16, vi15 )
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi2x5678 = ( vi28, vi27, vi26, vi25 )
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi3x5678 = ( vi38, vi37, vi36, vi35 )
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi4x5678 = ( vi48, vi47, vi46, vi45 )
const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi5x5678 = ( vi58, vi57, vi56, vi55 )
const __m128 vi5x5678 = _mm_shuffle_ps(vi5x8567, vi5x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi6x5678 = ( vi68, vi67, vi66, vi65 )
const __m128 vi6x5678 = _mm_shuffle_ps(vi6x8567, vi6x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x5678, vk02));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x5678, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x5678, vk12));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x5678, vk22));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
vi5x4567 = vi5x89AB;
vi6x4567 = vi6x89AB;
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
__m128 vo1 = _mm_max_ps(vo1p0, vmin);
__m128 vo2 = _mm_max_ps(vo2p0, vmin);
__m128 vo3 = _mm_max_ps(vo3p0, vmin);
__m128 vo4 = _mm_max_ps(vo4p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
vo1 = _mm_min_ps(vo1, vmax);
vo2 = _mm_min_ps(vo2, vmax);
vo3 = _mm_min_ps(vo3, vmax);
vo4 = _mm_min_ps(vo4, vmax);
_mm_storeu_ps(o4, vo4);
o4 += 4;
_mm_storeu_ps(o3, vo3);
o3 += 4;
_mm_storeu_ps(o2, vo2);
o2 += 4;
_mm_storeu_ps(o1, vo1);
o1 += 4;
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vmask, vi0x4567);
vi1x4567 = _mm_and_ps(vmask, vi1x4567);
vi2x4567 = _mm_and_ps(vmask, vi2x4567);
vi3x4567 = _mm_and_ps(vmask, vi3x4567);
vi4x4567 = _mm_and_ps(vmask, vi4x4567);
vi5x4567 = _mm_and_ps(vmask, vi5x4567);
vi6x4567 = _mm_and_ps(vmask, vi6x4567);
// vi0x7456 = ( vi06, vi05, vi04, vi07 )
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi1x7456 = ( vi16, vi15, vi14, vi17 )
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi2x7456 = ( vi26, vi25, vi24, vi27 )
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi3x7456 = ( vi36, vi35, vi34, vi37 )
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi4x7456 = ( vi46, vi45, vi44, vi47 )
const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi5x7456 = ( vi56, vi55, vi54, vi57 )
const __m128 vi5x7456 = _mm_shuffle_ps(vi5x4567, vi5x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi6x7456 = ( vi66, vi65, vi64, vi67 )
const __m128 vi6x7456 = _mm_shuffle_ps(vi6x4567, vi6x4567, _MM_SHUFFLE(2, 1, 0, 3));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
__m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
__m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi3x4567, vk01));
__m128 vo4p0 = _mm_add_ps(vbias, _mm_mul_ps(vi4x4567, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x4567, vk11));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x4567, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x4567, vk21));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x4567, vk21));
// vi0x3456 = ( vi06, vi05, vi04, vi03 )
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
// vi1x3456 = ( vi16, vi15, vi14, vi13 )
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
// vi2x3456 = ( vi26, vi25, vi24, vi23 )
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
// vi3x3456 = ( vi36, vi35, vi34, vi33 )
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
// vi4x3456 = ( vi46, vi45, vi44, vi43 )
const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
// vi5x3456 = ( vi56, vi55, vi54, vi53 )
const __m128 vi5x3456 = _mm_move_ss(vi5x7456, vi5x3012);
// vi6x3456 = ( vi66, vi65, vi64, vi63 )
const __m128 vi6x3456 = _mm_move_ss(vi6x7456, vi6x3012);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x3456, vk00));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x3456, vk10));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x3456, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x3456, vk20));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x3456, vk20));
const __m128 vzero = _mm_setzero_ps();
// vi0x8567 = ( vi07, vi06, vi05, 0.0 )
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
// vi1x8567 = ( vi17, vi16, vi15, 0.0 )
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
// vi2x8567 = ( vi27, vi26, vi25, 0.0 )
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
// vi3x8567 = ( vi37, vi36, vi35, 0.0 )
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vzero);
// vi4x8567 = ( vi47, vi46, vi45, 0.0 )
const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vzero);
// vi5x8567 = ( vi57, vi56, vi55, 0.0 )
const __m128 vi5x8567 = _mm_move_ss(vi5x4567, vzero);
// vi6x8567 = ( vi67, vi66, vi65, 0.0 )
const __m128 vi6x8567 = _mm_move_ss(vi6x4567, vzero);
// vi0x5678 = ( vi08, vi07, vi06, vi05 )
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi1x5678 = ( vi18, vi17, vi16, vi15 )
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi2x5678 = ( vi28, vi27, vi26, vi25 )
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi3x5678 = ( vi38, vi37, vi36, vi35 )
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi4x5678 = ( vi48, vi47, vi46, vi45 )
const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi5x5678 = ( vi58, vi57, vi56, vi55 )
const __m128 vi5x5678 = _mm_shuffle_ps(vi5x8567, vi5x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi6x5678 = ( vi68, vi67, vi66, vi65 )
const __m128 vi6x5678 = _mm_shuffle_ps(vi6x8567, vi6x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x5678, vk02));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x5678, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x5678, vk12));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x5678, vk22));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x5678, vk22));
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
__m128 vo1 = _mm_max_ps(vo1p0, vmin);
__m128 vo2 = _mm_max_ps(vo2p0, vmin);
__m128 vo3 = _mm_max_ps(vo3p0, vmin);
__m128 vo4 = _mm_max_ps(vo4p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
vo1 = _mm_min_ps(vo1, vmax);
vo2 = _mm_min_ps(vo2, vmax);
vo3 = _mm_min_ps(vo3, vmax);
vo4 = _mm_min_ps(vo4, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
_mm_storeu_ps(o4, vo4);
o4 += 4;
_mm_storeu_ps(o3, vo3);
o3 += 4;
_mm_storeu_ps(o2, vo2);
o2 += 4;
_mm_storeu_ps(o1, vo1);
o1 += 4;
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o4, vo4);
o4 += 2;
_mm_storel_pi((__m64*) o3, vo3);
o3 += 2;
_mm_storel_pi((__m64*) o2, vo2);
o2 += 2;
_mm_storel_pi((__m64*) o1, vo1);
o1 += 2;
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
vo1 = _mm_movehl_ps(vo1, vo1);
vo2 = _mm_movehl_ps(vo2, vo2);
vo3 = _mm_movehl_ps(vo3, vo3);
vo4 = _mm_movehl_ps(vo4, vo4);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o4, vo4);
o4 += 1;
_mm_store_ss(o3, vo3);
o3 += 1;
_mm_store_ss(o2, vo2);
o2 += 1;
_mm_store_ss(o1, vo1);
o1 += 1;
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i5 - input_decrement);
i1 = (const float*) ((uintptr_t) i6 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
i6 = (const float*) ((uintptr_t) i5 + input_width);
o0 = o4;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
o4 = (float*) ((uintptr_t) o3 + input_width);
output_height = doz(output_height, 5);
} while (output_height != 0);
}
| 20,809 | 41.995868 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-sse-6x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_6x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk10 = _mm_load1_ps(weights + 4);
const __m128 vk11 = _mm_load1_ps(weights + 5);
const __m128 vk12 = _mm_load1_ps(weights + 6);
const __m128 vk20 = _mm_load1_ps(weights + 7);
const __m128 vk21 = _mm_load1_ps(weights + 8);
const __m128 vk22 = _mm_load1_ps(weights + 9);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
float* o4 = (float*) ((uintptr_t) o3 + input_width);
float* o5 = (float*) ((uintptr_t) o4 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
o4 = o3;
}
if XNN_UNPREDICTABLE(output_height < 6) {
i6 = zero;
o5 = o4;
}
if XNN_UNPREDICTABLE(output_height < 7) {
i7 = zero;
}
// vi0x3012 = ( vi02, vi01, vi{M}0, vi{M}3 )
__m128 vi0x3012 = _mm_setzero_ps();
// vi1x3012 = ( vi12, vi11, vi{M}0, vi{M}3 )
__m128 vi1x3012 = _mm_setzero_ps();
// vi2x3012 = ( vi22, vi21, vi{M}0, vi{M}3 )
__m128 vi2x3012 = _mm_setzero_ps();
// vi3x3012 = ( vi32, vi31, vi{M}0, vi{M}3 )
__m128 vi3x3012 = _mm_setzero_ps();
// vi4x3012 = ( vi42, vi41, vi{M}0, vi{M}3 )
__m128 vi4x3012 = _mm_setzero_ps();
// vi5x3012 = ( vi52, vi51, vi{M}0, vi{M}3 )
__m128 vi5x3012 = _mm_setzero_ps();
// vi6x3012 = ( vi62, vi61, vi{M}0, vi{M}3 )
__m128 vi6x3012 = _mm_setzero_ps();
// vi7x3012 = ( vi72, vi71, vi{M}0, vi{M}3 )
__m128 vi7x3012 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vi3x4567 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vi4x4567 = _mm_loadu_ps(i4);
i4 += 4;
__m128 vi5x4567 = _mm_loadu_ps(i5);
i5 += 4;
__m128 vi6x4567 = _mm_loadu_ps(i6);
i6 += 4;
__m128 vi7x4567 = _mm_loadu_ps(i7);
i7 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
// vi0x89AB = ( vi0B, vi0A, vi09, vi08 )
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
// vi1x89AB = ( vi1B, vi1A, vi19, vi18 )
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
// vi2x89AB = ( vi2B, vi2A, vi29, vi28 )
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
// vi3x89AB = ( vi3B, vi3A, vi39, vi38 )
const __m128 vi3x89AB = _mm_loadu_ps(i3);
i3 += 4;
// vi4x89AB = ( vi4B, vi4A, vi49, vi48 )
const __m128 vi4x89AB = _mm_loadu_ps(i4);
i4 += 4;
// vi5x89AB = ( vi5B, vi5A, vi59, vi58 )
const __m128 vi5x89AB = _mm_loadu_ps(i5);
i5 += 4;
// vi6x89AB = ( vi6B, vi6A, vi69, vi68 )
const __m128 vi6x89AB = _mm_loadu_ps(i6);
i6 += 4;
// vi7x89AB = ( vi7B, vi7A, vi79, vi78 )
const __m128 vi7x89AB = _mm_loadu_ps(i7);
i7 += 4;
// vi0x7456 = ( vi06, vi05, vi04, vi07 )
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi1x7456 = ( vi16, vi15, vi14, vi17 )
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi2x7456 = ( vi26, vi25, vi24, vi27 )
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi3x7456 = ( vi36, vi35, vi34, vi37 )
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi4x7456 = ( vi46, vi45, vi44, vi47 )
const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi5x7456 = ( vi56, vi55, vi54, vi57 )
const __m128 vi5x7456 = _mm_shuffle_ps(vi5x4567, vi5x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi6x7456 = ( vi66, vi65, vi64, vi67 )
const __m128 vi6x7456 = _mm_shuffle_ps(vi6x4567, vi6x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi7x7456 = ( vi76, vi75, vi74, vi77 )
const __m128 vi7x7456 = _mm_shuffle_ps(vi7x4567, vi7x4567, _MM_SHUFFLE(2, 1, 0, 3));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
__m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
__m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi3x4567, vk01));
__m128 vo4p0 = _mm_add_ps(vbias, _mm_mul_ps(vi4x4567, vk01));
__m128 vo5p0 = _mm_add_ps(vbias, _mm_mul_ps(vi5x4567, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x4567, vk11));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x4567, vk11));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi6x4567, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x4567, vk21));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x4567, vk21));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi7x4567, vk21));
// vi0x3456 = ( vi06, vi05, vi04, vi03 )
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
// vi1x3456 = ( vi16, vi15, vi14, vi13 )
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
// vi2x3456 = ( vi26, vi25, vi24, vi23 )
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
// vi3x3456 = ( vi36, vi35, vi34, vi33 )
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
// vi4x3456 = ( vi46, vi45, vi44, vi43 )
const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
// vi5x3456 = ( vi56, vi55, vi54, vi53 )
const __m128 vi5x3456 = _mm_move_ss(vi5x7456, vi5x3012);
// vi6x3456 = ( vi66, vi65, vi64, vi63 )
const __m128 vi6x3456 = _mm_move_ss(vi6x7456, vi6x3012);
// vi7x3456 = ( vi76, vi75, vi74, vi73 )
const __m128 vi7x3456 = _mm_move_ss(vi7x7456, vi7x3012);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x3456, vk00));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x3456, vk00));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi5x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x3456, vk10));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x3456, vk10));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi6x3456, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x3456, vk20));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x3456, vk20));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi7x3456, vk20));
vi0x3012 = vi0x7456;
vi1x3012 = vi1x7456;
vi2x3012 = vi2x7456;
vi3x3012 = vi3x7456;
vi4x3012 = vi4x7456;
vi5x3012 = vi5x7456;
vi6x3012 = vi6x7456;
vi7x3012 = vi7x7456;
// vi0x8567 = ( vi07, vi06, vi05, vi08 )
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
// vi1x8567 = ( vi17, vi16, vi15, vi18 )
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
// vi2x8567 = ( vi27, vi26, vi25, vi28 )
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
// vi3x8567 = ( vi37, vi36, vi35, vi38 )
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
// vi4x8567 = ( vi47, vi46, vi45, vi48 )
const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vi4x89AB);
// vi5x8567 = ( vi57, vi56, vi55, vi58 )
const __m128 vi5x8567 = _mm_move_ss(vi5x4567, vi5x89AB);
// vi6x8567 = ( vi67, vi66, vi65, vi68 )
const __m128 vi6x8567 = _mm_move_ss(vi6x4567, vi6x89AB);
// vi7x8567 = ( vi77, vi76, vi75, vi78 )
const __m128 vi7x8567 = _mm_move_ss(vi7x4567, vi7x89AB);
// vi0x5678 = ( vi08, vi07, vi06, vi05 )
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi1x5678 = ( vi18, vi17, vi16, vi15 )
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi2x5678 = ( vi28, vi27, vi26, vi25 )
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi3x5678 = ( vi38, vi37, vi36, vi35 )
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi4x5678 = ( vi48, vi47, vi46, vi45 )
const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi5x5678 = ( vi58, vi57, vi56, vi55 )
const __m128 vi5x5678 = _mm_shuffle_ps(vi5x8567, vi5x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi6x5678 = ( vi68, vi67, vi66, vi65 )
const __m128 vi6x5678 = _mm_shuffle_ps(vi6x8567, vi6x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi7x5678 = ( vi78, vi77, vi76, vi75 )
const __m128 vi7x5678 = _mm_shuffle_ps(vi7x8567, vi7x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x5678, vk02));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x5678, vk02));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi5x5678, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x5678, vk12));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x5678, vk12));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi6x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x5678, vk22));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x5678, vk22));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi7x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
vi5x4567 = vi5x89AB;
vi6x4567 = vi6x89AB;
vi7x4567 = vi7x89AB;
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
__m128 vo1 = _mm_max_ps(vo1p0, vmin);
__m128 vo2 = _mm_max_ps(vo2p0, vmin);
__m128 vo3 = _mm_max_ps(vo3p0, vmin);
__m128 vo4 = _mm_max_ps(vo4p0, vmin);
__m128 vo5 = _mm_max_ps(vo5p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
vo1 = _mm_min_ps(vo1, vmax);
vo2 = _mm_min_ps(vo2, vmax);
vo3 = _mm_min_ps(vo3, vmax);
vo4 = _mm_min_ps(vo4, vmax);
vo5 = _mm_min_ps(vo5, vmax);
_mm_storeu_ps(o5, vo5);
o5 += 4;
_mm_storeu_ps(o4, vo4);
o4 += 4;
_mm_storeu_ps(o3, vo3);
o3 += 4;
_mm_storeu_ps(o2, vo2);
o2 += 4;
_mm_storeu_ps(o1, vo1);
o1 += 4;
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vmask, vi0x4567);
vi1x4567 = _mm_and_ps(vmask, vi1x4567);
vi2x4567 = _mm_and_ps(vmask, vi2x4567);
vi3x4567 = _mm_and_ps(vmask, vi3x4567);
vi4x4567 = _mm_and_ps(vmask, vi4x4567);
vi5x4567 = _mm_and_ps(vmask, vi5x4567);
vi6x4567 = _mm_and_ps(vmask, vi6x4567);
vi7x4567 = _mm_and_ps(vmask, vi7x4567);
// vi0x7456 = ( vi06, vi05, vi04, vi07 )
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi1x7456 = ( vi16, vi15, vi14, vi17 )
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi2x7456 = ( vi26, vi25, vi24, vi27 )
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi3x7456 = ( vi36, vi35, vi34, vi37 )
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi4x7456 = ( vi46, vi45, vi44, vi47 )
const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi5x7456 = ( vi56, vi55, vi54, vi57 )
const __m128 vi5x7456 = _mm_shuffle_ps(vi5x4567, vi5x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi6x7456 = ( vi66, vi65, vi64, vi67 )
const __m128 vi6x7456 = _mm_shuffle_ps(vi6x4567, vi6x4567, _MM_SHUFFLE(2, 1, 0, 3));
// vi7x7456 = ( vi76, vi75, vi74, vi77 )
const __m128 vi7x7456 = _mm_shuffle_ps(vi7x4567, vi7x4567, _MM_SHUFFLE(2, 1, 0, 3));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
__m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
__m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi3x4567, vk01));
__m128 vo4p0 = _mm_add_ps(vbias, _mm_mul_ps(vi4x4567, vk01));
__m128 vo5p0 = _mm_add_ps(vbias, _mm_mul_ps(vi5x4567, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x4567, vk11));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x4567, vk11));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi6x4567, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x4567, vk21));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x4567, vk21));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi7x4567, vk21));
// vi0x3456 = ( vi06, vi05, vi04, vi03 )
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
// vi1x3456 = ( vi16, vi15, vi14, vi13 )
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
// vi2x3456 = ( vi26, vi25, vi24, vi23 )
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
// vi3x3456 = ( vi36, vi35, vi34, vi33 )
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
// vi4x3456 = ( vi46, vi45, vi44, vi43 )
const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
// vi5x3456 = ( vi56, vi55, vi54, vi53 )
const __m128 vi5x3456 = _mm_move_ss(vi5x7456, vi5x3012);
// vi6x3456 = ( vi66, vi65, vi64, vi63 )
const __m128 vi6x3456 = _mm_move_ss(vi6x7456, vi6x3012);
// vi7x3456 = ( vi76, vi75, vi74, vi73 )
const __m128 vi7x3456 = _mm_move_ss(vi7x7456, vi7x3012);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x3456, vk00));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x3456, vk00));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi5x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x3456, vk10));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x3456, vk10));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi6x3456, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x3456, vk20));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x3456, vk20));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi7x3456, vk20));
const __m128 vzero = _mm_setzero_ps();
// vi0x8567 = ( vi07, vi06, vi05, 0.0 )
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
// vi1x8567 = ( vi17, vi16, vi15, 0.0 )
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
// vi2x8567 = ( vi27, vi26, vi25, 0.0 )
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
// vi3x8567 = ( vi37, vi36, vi35, 0.0 )
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vzero);
// vi4x8567 = ( vi47, vi46, vi45, 0.0 )
const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vzero);
// vi5x8567 = ( vi57, vi56, vi55, 0.0 )
const __m128 vi5x8567 = _mm_move_ss(vi5x4567, vzero);
// vi6x8567 = ( vi67, vi66, vi65, 0.0 )
const __m128 vi6x8567 = _mm_move_ss(vi6x4567, vzero);
// vi7x8567 = ( vi77, vi76, vi75, 0.0 )
const __m128 vi7x8567 = _mm_move_ss(vi7x4567, vzero);
// vi0x5678 = ( vi08, vi07, vi06, vi05 )
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi1x5678 = ( vi18, vi17, vi16, vi15 )
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi2x5678 = ( vi28, vi27, vi26, vi25 )
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi3x5678 = ( vi38, vi37, vi36, vi35 )
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi4x5678 = ( vi48, vi47, vi46, vi45 )
const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi5x5678 = ( vi58, vi57, vi56, vi55 )
const __m128 vi5x5678 = _mm_shuffle_ps(vi5x8567, vi5x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi6x5678 = ( vi68, vi67, vi66, vi65 )
const __m128 vi6x5678 = _mm_shuffle_ps(vi6x8567, vi6x8567, _MM_SHUFFLE(0, 3, 2, 1));
// vi7x5678 = ( vi78, vi77, vi76, vi75 )
const __m128 vi7x5678 = _mm_shuffle_ps(vi7x8567, vi7x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x5678, vk02));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x5678, vk02));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi5x5678, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x5678, vk12));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x5678, vk12));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi6x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x5678, vk22));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x5678, vk22));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi7x5678, vk22));
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
__m128 vo1 = _mm_max_ps(vo1p0, vmin);
__m128 vo2 = _mm_max_ps(vo2p0, vmin);
__m128 vo3 = _mm_max_ps(vo3p0, vmin);
__m128 vo4 = _mm_max_ps(vo4p0, vmin);
__m128 vo5 = _mm_max_ps(vo5p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
vo1 = _mm_min_ps(vo1, vmax);
vo2 = _mm_min_ps(vo2, vmax);
vo3 = _mm_min_ps(vo3, vmax);
vo4 = _mm_min_ps(vo4, vmax);
vo5 = _mm_min_ps(vo5, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
_mm_storeu_ps(o5, vo5);
o5 += 4;
_mm_storeu_ps(o4, vo4);
o4 += 4;
_mm_storeu_ps(o3, vo3);
o3 += 4;
_mm_storeu_ps(o2, vo2);
o2 += 4;
_mm_storeu_ps(o1, vo1);
o1 += 4;
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o5, vo5);
o5 += 2;
_mm_storel_pi((__m64*) o4, vo4);
o4 += 2;
_mm_storel_pi((__m64*) o3, vo3);
o3 += 2;
_mm_storel_pi((__m64*) o2, vo2);
o2 += 2;
_mm_storel_pi((__m64*) o1, vo1);
o1 += 2;
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
vo1 = _mm_movehl_ps(vo1, vo1);
vo2 = _mm_movehl_ps(vo2, vo2);
vo3 = _mm_movehl_ps(vo3, vo3);
vo4 = _mm_movehl_ps(vo4, vo4);
vo5 = _mm_movehl_ps(vo5, vo5);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o5, vo5);
o5 += 1;
_mm_store_ss(o4, vo4);
o4 += 1;
_mm_store_ss(o3, vo3);
o3 += 1;
_mm_store_ss(o2, vo2);
o2 += 1;
_mm_store_ss(o1, vo1);
o1 += 1;
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i6 - input_decrement);
i1 = (const float*) ((uintptr_t) i7 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
i6 = (const float*) ((uintptr_t) i5 + input_width);
i7 = (const float*) ((uintptr_t) i6 + input_width);
o0 = o5;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
o4 = (float*) ((uintptr_t) o3 + input_width);
o5 = (float*) ((uintptr_t) o4 + input_width);
output_height = doz(output_height, 6);
} while (output_height != 0);
}
| 23,980 | 42.681239 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-ssse3-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk10 = _mm_load1_ps(weights + 4);
const __m128 vk11 = _mm_load1_ps(weights + 5);
const __m128 vk12 = _mm_load1_ps(weights + 6);
const __m128 vk20 = _mm_load1_ps(weights + 7);
const __m128 vk21 = _mm_load1_ps(weights + 8);
const __m128 vk22 = _mm_load1_ps(weights + 9);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
__m128 vi0x0123 = _mm_setzero_ps();
__m128 vi1x0123 = _mm_setzero_ps();
__m128 vi2x0123 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si128(vi0x0123), 12));
const __m128 vi1x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x4567), _mm_castps_si128(vi1x0123), 12));
const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si128(vi2x0123), 12));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const __m128 vi0x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x89AB), _mm_castps_si128(vi0x4567), 4));
const __m128 vi1x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x89AB), _mm_castps_si128(vi1x4567), 4));
const __m128 vi2x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x89AB), _mm_castps_si128(vi2x4567), 4));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vmask, vi0x4567);
vi1x4567 = _mm_and_ps(vmask, vi1x4567);
vi2x4567 = _mm_and_ps(vmask, vi2x4567);
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si128(vi0x0123), 12));
const __m128 vi1x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x4567), _mm_castps_si128(vi1x0123), 12));
const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si128(vi2x0123), 12));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk20));
const __m128i vzero = _mm_setzero_si128();
const __m128 vi0x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi0x4567), 4));
const __m128 vi1x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi1x4567), 4));
const __m128 vi2x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi2x4567), 4));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 6,315 | 34.483146 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-ssse3-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk10 = _mm_load1_ps(weights + 4);
const __m128 vk11 = _mm_load1_ps(weights + 5);
const __m128 vk12 = _mm_load1_ps(weights + 6);
const __m128 vk20 = _mm_load1_ps(weights + 7);
const __m128 vk21 = _mm_load1_ps(weights + 8);
const __m128 vk22 = _mm_load1_ps(weights + 9);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
__m128 vi0x0123 = _mm_setzero_ps();
__m128 vi1x0123 = _mm_setzero_ps();
__m128 vi2x0123 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
__m128 vo0p2 = _mm_mul_ps(vi2x4567, vk21);
const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si128(vi0x0123), 12));
const __m128 vi1x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x4567), _mm_castps_si128(vi1x0123), 12));
const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si128(vi2x0123), 12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x3456, vk10));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const __m128 vi0x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x89AB), _mm_castps_si128(vi0x4567), 4));
const __m128 vi1x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x89AB), _mm_castps_si128(vi1x4567), 4));
const __m128 vi2x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x89AB), _mm_castps_si128(vi2x4567), 4));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk12));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vmask, vi0x4567);
vi1x4567 = _mm_and_ps(vmask, vi1x4567);
vi2x4567 = _mm_and_ps(vmask, vi2x4567);
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
__m128 vo0p2 = _mm_mul_ps(vi2x4567, vk21);
const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si128(vi0x0123), 12));
const __m128 vi1x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x4567), _mm_castps_si128(vi1x0123), 12));
const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si128(vi2x0123), 12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x3456, vk10));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x3456, vk20));
const __m128i vzero = _mm_setzero_si128();
const __m128 vi0x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi0x4567), 4));
const __m128 vi1x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi1x4567), 4));
const __m128 vi2x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi2x4567), 4));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk12));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x5678, vk22));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 6,371 | 34.4 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-ssse3-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk10 = _mm_load1_ps(weights + 4);
const __m128 vk11 = _mm_load1_ps(weights + 5);
const __m128 vk12 = _mm_load1_ps(weights + 6);
const __m128 vk20 = _mm_load1_ps(weights + 7);
const __m128 vk21 = _mm_load1_ps(weights + 8);
const __m128 vk22 = _mm_load1_ps(weights + 9);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
__m128 vi0x0123 = _mm_setzero_ps();
__m128 vi1x0123 = _mm_setzero_ps();
__m128 vi2x0123 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
__m128 vo0p2 = _mm_mul_ps(vi2x4567, vk21);
const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si128(vi0x0123), 12));
const __m128 vi1x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x4567), _mm_castps_si128(vi1x0123), 12));
const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si128(vi2x0123), 12));
__m128 vo0p3 = _mm_mul_ps(vi0x3456, vk00);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const __m128 vi0x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x89AB), _mm_castps_si128(vi0x4567), 4));
const __m128 vi1x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x89AB), _mm_castps_si128(vi1x4567), 4));
const __m128 vi2x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x89AB), _mm_castps_si128(vi2x4567), 4));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x5678, vk02));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi1x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p2 = _mm_add_ps(vo0p2, vo0p3);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vmask, vi0x4567);
vi1x4567 = _mm_and_ps(vmask, vi1x4567);
vi2x4567 = _mm_and_ps(vmask, vi2x4567);
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
__m128 vo0p2 = _mm_mul_ps(vi2x4567, vk21);
const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si128(vi0x0123), 12));
const __m128 vi1x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x4567), _mm_castps_si128(vi1x0123), 12));
const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si128(vi2x0123), 12));
__m128 vo0p3 = _mm_mul_ps(vi0x3456, vk00);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk20));
const __m128i vzero = _mm_setzero_si128();
const __m128 vi0x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi0x4567), 4));
const __m128 vi1x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi1x4567), 4));
const __m128 vi2x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi2x4567), 4));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x5678, vk02));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi1x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p2 = _mm_add_ps(vo0p2, vo0p3);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 6,427 | 34.318681 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-ssse3-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk10 = _mm_load1_ps(weights + 4);
const __m128 vk11 = _mm_load1_ps(weights + 5);
const __m128 vk12 = _mm_load1_ps(weights + 6);
const __m128 vk20 = _mm_load1_ps(weights + 7);
const __m128 vk21 = _mm_load1_ps(weights + 8);
const __m128 vk22 = _mm_load1_ps(weights + 9);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
__m128 vi0x0123 = _mm_setzero_ps();
__m128 vi1x0123 = _mm_setzero_ps();
__m128 vi2x0123 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si128(vi0x0123), 12));
const __m128 vi1x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x4567), _mm_castps_si128(vi1x0123), 12));
const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si128(vi2x0123), 12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const __m128 vi0x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x89AB), _mm_castps_si128(vi0x4567), 4));
const __m128 vi1x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x89AB), _mm_castps_si128(vi1x4567), 4));
const __m128 vi2x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x89AB), _mm_castps_si128(vi2x4567), 4));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vmask, vi0x4567);
vi1x4567 = _mm_and_ps(vmask, vi1x4567);
vi2x4567 = _mm_and_ps(vmask, vi2x4567);
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si128(vi0x0123), 12));
const __m128 vi1x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x4567), _mm_castps_si128(vi1x0123), 12));
const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si128(vi2x0123), 12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
const __m128i vzero = _mm_setzero_si128();
const __m128 vi0x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi0x4567), 4));
const __m128 vi1x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi1x4567), 4));
const __m128 vi2x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi2x4567), 4));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 6,254 | 34.539773 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-ssse3-2x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_2x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk10 = _mm_load1_ps(weights + 4);
const __m128 vk11 = _mm_load1_ps(weights + 5);
const __m128 vk12 = _mm_load1_ps(weights + 6);
const __m128 vk20 = _mm_load1_ps(weights + 7);
const __m128 vk21 = _mm_load1_ps(weights + 8);
const __m128 vk22 = _mm_load1_ps(weights + 9);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
}
__m128 vi0x0123 = _mm_setzero_ps();
__m128 vi1x0123 = _mm_setzero_ps();
__m128 vi2x0123 = _mm_setzero_ps();
__m128 vi3x0123 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vi3x4567 = _mm_loadu_ps(i3);
i3 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3x89AB = _mm_loadu_ps(i3);
i3 += 4;
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
__m128 vo1p1 = _mm_mul_ps(vi2x4567, vk11);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si128(vi0x0123), 12));
const __m128 vi1x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x4567), _mm_castps_si128(vi1x0123), 12));
const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si128(vi2x0123), 12));
const __m128 vi3x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi3x4567), _mm_castps_si128(vi3x0123), 12));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x3456, vk00));
vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi1x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk20));
vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi3x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
const __m128 vi0x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x89AB), _mm_castps_si128(vi0x4567), 4));
const __m128 vi1x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x89AB), _mm_castps_si128(vi1x4567), 4));
const __m128 vi2x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x89AB), _mm_castps_si128(vi2x4567), 4));
const __m128 vi3x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi3x89AB), _mm_castps_si128(vi3x4567), 4));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk12));
vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi2x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo1p0 = _mm_add_ps(vo1p0, vo1p1);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
__m128 vo1 = _mm_max_ps(vo1p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
vo1 = _mm_min_ps(vo1, vmax);
_mm_storeu_ps(o1, vo1);
o1 += 4;
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vmask, vi0x4567);
vi1x4567 = _mm_and_ps(vmask, vi1x4567);
vi2x4567 = _mm_and_ps(vmask, vi2x4567);
vi3x4567 = _mm_and_ps(vmask, vi3x4567);
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk11);
__m128 vo1p1 = _mm_mul_ps(vi2x4567, vk11);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si128(vi0x0123), 12));
const __m128 vi1x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x4567), _mm_castps_si128(vi1x0123), 12));
const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si128(vi2x0123), 12));
const __m128 vi3x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi3x4567), _mm_castps_si128(vi3x0123), 12));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x3456, vk00));
vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi1x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk20));
vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi3x3456, vk20));
const __m128i vzero = _mm_setzero_si128();
const __m128 vi0x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi0x4567), 4));
const __m128 vi1x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi1x4567), 4));
const __m128 vi2x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi2x4567), 4));
const __m128 vi3x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi3x4567), 4));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk12));
vo1p1 = _mm_add_ps(vo1p1, _mm_mul_ps(vi2x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo1p0 = _mm_add_ps(vo1p0, vo1p1);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
__m128 vo1 = _mm_max_ps(vo1p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
vo1 = _mm_min_ps(vo1, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
_mm_storeu_ps(o1, vo1);
o1 += 4;
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o1, vo1);
o1 += 2;
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
vo1 = _mm_movehl_ps(vo1, vo1);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o1, vo1);
o1 += 1;
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 8,989 | 37.583691 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-ssse3-2x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_2x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk10 = _mm_load1_ps(weights + 4);
const __m128 vk11 = _mm_load1_ps(weights + 5);
const __m128 vk12 = _mm_load1_ps(weights + 6);
const __m128 vk20 = _mm_load1_ps(weights + 7);
const __m128 vk21 = _mm_load1_ps(weights + 8);
const __m128 vk22 = _mm_load1_ps(weights + 9);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
}
__m128 vi0x0123 = _mm_setzero_ps();
__m128 vi1x0123 = _mm_setzero_ps();
__m128 vi2x0123 = _mm_setzero_ps();
__m128 vi3x0123 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vi3x4567 = _mm_loadu_ps(i3);
i3 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3x89AB = _mm_loadu_ps(i3);
i3 += 4;
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si128(vi0x0123), 12));
const __m128 vi1x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x4567), _mm_castps_si128(vi1x0123), 12));
const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si128(vi2x0123), 12));
const __m128 vi3x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi3x4567), _mm_castps_si128(vi3x0123), 12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
const __m128 vi0x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x89AB), _mm_castps_si128(vi0x4567), 4));
const __m128 vi1x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x89AB), _mm_castps_si128(vi1x4567), 4));
const __m128 vi2x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x89AB), _mm_castps_si128(vi2x4567), 4));
const __m128 vi3x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi3x89AB), _mm_castps_si128(vi3x4567), 4));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
__m128 vo1 = _mm_max_ps(vo1p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
vo1 = _mm_min_ps(vo1, vmax);
_mm_storeu_ps(o1, vo1);
o1 += 4;
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vmask, vi0x4567);
vi1x4567 = _mm_and_ps(vmask, vi1x4567);
vi2x4567 = _mm_and_ps(vmask, vi2x4567);
vi3x4567 = _mm_and_ps(vmask, vi3x4567);
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si128(vi0x0123), 12));
const __m128 vi1x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x4567), _mm_castps_si128(vi1x0123), 12));
const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si128(vi2x0123), 12));
const __m128 vi3x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi3x4567), _mm_castps_si128(vi3x0123), 12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
const __m128i vzero = _mm_setzero_si128();
const __m128 vi0x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi0x4567), 4));
const __m128 vi1x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi1x4567), 4));
const __m128 vi2x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi2x4567), 4));
const __m128 vi3x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi3x4567), 4));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
__m128 vo1 = _mm_max_ps(vo1p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
vo1 = _mm_min_ps(vo1, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
_mm_storeu_ps(o1, vo1);
o1 += 4;
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o1, vo1);
o1 += 2;
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
vo1 = _mm_movehl_ps(vo1, vo1);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o1, vo1);
o1 += 1;
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 8,872 | 37.746725 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-ssse3-3x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_3x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk10 = _mm_load1_ps(weights + 4);
const __m128 vk11 = _mm_load1_ps(weights + 5);
const __m128 vk12 = _mm_load1_ps(weights + 6);
const __m128 vk20 = _mm_load1_ps(weights + 7);
const __m128 vk21 = _mm_load1_ps(weights + 8);
const __m128 vk22 = _mm_load1_ps(weights + 9);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
}
__m128 vi0x0123 = _mm_setzero_ps();
__m128 vi1x0123 = _mm_setzero_ps();
__m128 vi2x0123 = _mm_setzero_ps();
__m128 vi3x0123 = _mm_setzero_ps();
__m128 vi4x0123 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vi3x4567 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vi4x4567 = _mm_loadu_ps(i4);
i4 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3x89AB = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4x89AB = _mm_loadu_ps(i4);
i4 += 4;
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
__m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si128(vi0x0123), 12));
const __m128 vi1x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x4567), _mm_castps_si128(vi1x0123), 12));
const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si128(vi2x0123), 12));
const __m128 vi3x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi3x4567), _mm_castps_si128(vi3x0123), 12));
const __m128 vi4x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi4x4567), _mm_castps_si128(vi4x0123), 12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
const __m128 vi0x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x89AB), _mm_castps_si128(vi0x4567), 4));
const __m128 vi1x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x89AB), _mm_castps_si128(vi1x4567), 4));
const __m128 vi2x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x89AB), _mm_castps_si128(vi2x4567), 4));
const __m128 vi3x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi3x89AB), _mm_castps_si128(vi3x4567), 4));
const __m128 vi4x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi4x89AB), _mm_castps_si128(vi4x4567), 4));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
__m128 vo1 = _mm_max_ps(vo1p0, vmin);
__m128 vo2 = _mm_max_ps(vo2p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
vo1 = _mm_min_ps(vo1, vmax);
vo2 = _mm_min_ps(vo2, vmax);
_mm_storeu_ps(o2, vo2);
o2 += 4;
_mm_storeu_ps(o1, vo1);
o1 += 4;
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vmask, vi0x4567);
vi1x4567 = _mm_and_ps(vmask, vi1x4567);
vi2x4567 = _mm_and_ps(vmask, vi2x4567);
vi3x4567 = _mm_and_ps(vmask, vi3x4567);
vi4x4567 = _mm_and_ps(vmask, vi4x4567);
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
__m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si128(vi0x0123), 12));
const __m128 vi1x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x4567), _mm_castps_si128(vi1x0123), 12));
const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si128(vi2x0123), 12));
const __m128 vi3x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi3x4567), _mm_castps_si128(vi3x0123), 12));
const __m128 vi4x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi4x4567), _mm_castps_si128(vi4x0123), 12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
const __m128i vzero = _mm_setzero_si128();
const __m128 vi0x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi0x4567), 4));
const __m128 vi1x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi1x4567), 4));
const __m128 vi2x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi2x4567), 4));
const __m128 vi3x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi3x4567), 4));
const __m128 vi4x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi4x4567), 4));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
__m128 vo1 = _mm_max_ps(vo1p0, vmin);
__m128 vo2 = _mm_max_ps(vo2p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
vo1 = _mm_min_ps(vo1, vmax);
vo2 = _mm_min_ps(vo2, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
_mm_storeu_ps(o2, vo2);
o2 += 4;
_mm_storeu_ps(o1, vo1);
o1 += 4;
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o2, vo2);
o2 += 2;
_mm_storel_pi((__m64*) o1, vo1);
o1 += 2;
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
vo1 = _mm_movehl_ps(vo1, vo1);
vo2 = _mm_movehl_ps(vo2, vo2);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o2, vo2);
o2 += 1;
_mm_store_ss(o1, vo1);
o1 += 1;
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i3 - input_decrement);
i1 = (const float*) ((uintptr_t) i4 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
o0 = o2;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
output_height = doz(output_height, 3);
} while (output_height != 0);
}
| 11,436 | 39.846429 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-ssse3-4x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_4x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk10 = _mm_load1_ps(weights + 4);
const __m128 vk11 = _mm_load1_ps(weights + 5);
const __m128 vk12 = _mm_load1_ps(weights + 6);
const __m128 vk20 = _mm_load1_ps(weights + 7);
const __m128 vk21 = _mm_load1_ps(weights + 8);
const __m128 vk22 = _mm_load1_ps(weights + 9);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
}
__m128 vi0x0123 = _mm_setzero_ps();
__m128 vi1x0123 = _mm_setzero_ps();
__m128 vi2x0123 = _mm_setzero_ps();
__m128 vi3x0123 = _mm_setzero_ps();
__m128 vi4x0123 = _mm_setzero_ps();
__m128 vi5x0123 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vi3x4567 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vi4x4567 = _mm_loadu_ps(i4);
i4 += 4;
__m128 vi5x4567 = _mm_loadu_ps(i5);
i5 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3x89AB = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4x89AB = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi5x89AB = _mm_loadu_ps(i5);
i5 += 4;
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
__m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
__m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi3x4567, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x4567, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x4567, vk21));
const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si128(vi0x0123), 12));
const __m128 vi1x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x4567), _mm_castps_si128(vi1x0123), 12));
const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si128(vi2x0123), 12));
const __m128 vi3x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi3x4567), _mm_castps_si128(vi3x0123), 12));
const __m128 vi4x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi4x4567), _mm_castps_si128(vi4x0123), 12));
const __m128 vi5x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi5x4567), _mm_castps_si128(vi5x0123), 12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x3456, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
vi5x0123 = vi5x4567;
const __m128 vi0x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x89AB), _mm_castps_si128(vi0x4567), 4));
const __m128 vi1x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x89AB), _mm_castps_si128(vi1x4567), 4));
const __m128 vi2x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x89AB), _mm_castps_si128(vi2x4567), 4));
const __m128 vi3x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi3x89AB), _mm_castps_si128(vi3x4567), 4));
const __m128 vi4x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi4x89AB), _mm_castps_si128(vi4x4567), 4));
const __m128 vi5x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi5x89AB), _mm_castps_si128(vi5x4567), 4));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x5678, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
vi5x4567 = vi5x89AB;
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
__m128 vo1 = _mm_max_ps(vo1p0, vmin);
__m128 vo2 = _mm_max_ps(vo2p0, vmin);
__m128 vo3 = _mm_max_ps(vo3p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
vo1 = _mm_min_ps(vo1, vmax);
vo2 = _mm_min_ps(vo2, vmax);
vo3 = _mm_min_ps(vo3, vmax);
_mm_storeu_ps(o3, vo3);
o3 += 4;
_mm_storeu_ps(o2, vo2);
o2 += 4;
_mm_storeu_ps(o1, vo1);
o1 += 4;
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vmask, vi0x4567);
vi1x4567 = _mm_and_ps(vmask, vi1x4567);
vi2x4567 = _mm_and_ps(vmask, vi2x4567);
vi3x4567 = _mm_and_ps(vmask, vi3x4567);
vi4x4567 = _mm_and_ps(vmask, vi4x4567);
vi5x4567 = _mm_and_ps(vmask, vi5x4567);
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
__m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
__m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi3x4567, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x4567, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x4567, vk21));
const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si128(vi0x0123), 12));
const __m128 vi1x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x4567), _mm_castps_si128(vi1x0123), 12));
const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si128(vi2x0123), 12));
const __m128 vi3x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi3x4567), _mm_castps_si128(vi3x0123), 12));
const __m128 vi4x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi4x4567), _mm_castps_si128(vi4x0123), 12));
const __m128 vi5x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi5x4567), _mm_castps_si128(vi5x0123), 12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x3456, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x3456, vk20));
const __m128i vzero = _mm_setzero_si128();
const __m128 vi0x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi0x4567), 4));
const __m128 vi1x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi1x4567), 4));
const __m128 vi2x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi2x4567), 4));
const __m128 vi3x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi3x4567), 4));
const __m128 vi4x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi4x4567), 4));
const __m128 vi5x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi5x4567), 4));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x5678, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x5678, vk22));
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
__m128 vo1 = _mm_max_ps(vo1p0, vmin);
__m128 vo2 = _mm_max_ps(vo2p0, vmin);
__m128 vo3 = _mm_max_ps(vo3p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
vo1 = _mm_min_ps(vo1, vmax);
vo2 = _mm_min_ps(vo2, vmax);
vo3 = _mm_min_ps(vo3, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
_mm_storeu_ps(o3, vo3);
o3 += 4;
_mm_storeu_ps(o2, vo2);
o2 += 4;
_mm_storeu_ps(o1, vo1);
o1 += 4;
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o3, vo3);
o3 += 2;
_mm_storel_pi((__m64*) o2, vo2);
o2 += 2;
_mm_storel_pi((__m64*) o1, vo1);
o1 += 2;
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
vo1 = _mm_movehl_ps(vo1, vo1);
vo2 = _mm_movehl_ps(vo2, vo2);
vo3 = _mm_movehl_ps(vo3, vo3);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o3, vo3);
o3 += 1;
_mm_store_ss(o2, vo2);
o2 += 1;
_mm_store_ss(o1, vo1);
o1 += 1;
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i4 - input_decrement);
i1 = (const float*) ((uintptr_t) i5 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
o0 = o3;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
output_height = doz(output_height, 4);
} while (output_height != 0);
}
| 14,000 | 41.299094 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-ssse3-5x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_5x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk10 = _mm_load1_ps(weights + 4);
const __m128 vk11 = _mm_load1_ps(weights + 5);
const __m128 vk12 = _mm_load1_ps(weights + 6);
const __m128 vk20 = _mm_load1_ps(weights + 7);
const __m128 vk21 = _mm_load1_ps(weights + 8);
const __m128 vk22 = _mm_load1_ps(weights + 9);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
float* o4 = (float*) ((uintptr_t) o3 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
o4 = o3;
}
if XNN_UNPREDICTABLE(output_height < 6) {
i6 = zero;
}
__m128 vi0x0123 = _mm_setzero_ps();
__m128 vi1x0123 = _mm_setzero_ps();
__m128 vi2x0123 = _mm_setzero_ps();
__m128 vi3x0123 = _mm_setzero_ps();
__m128 vi4x0123 = _mm_setzero_ps();
__m128 vi5x0123 = _mm_setzero_ps();
__m128 vi6x0123 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vi3x4567 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vi4x4567 = _mm_loadu_ps(i4);
i4 += 4;
__m128 vi5x4567 = _mm_loadu_ps(i5);
i5 += 4;
__m128 vi6x4567 = _mm_loadu_ps(i6);
i6 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3x89AB = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4x89AB = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi5x89AB = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vi6x89AB = _mm_loadu_ps(i6);
i6 += 4;
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
__m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
__m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi3x4567, vk01));
__m128 vo4p0 = _mm_add_ps(vbias, _mm_mul_ps(vi4x4567, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x4567, vk11));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x4567, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x4567, vk21));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x4567, vk21));
const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si128(vi0x0123), 12));
const __m128 vi1x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x4567), _mm_castps_si128(vi1x0123), 12));
const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si128(vi2x0123), 12));
const __m128 vi3x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi3x4567), _mm_castps_si128(vi3x0123), 12));
const __m128 vi4x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi4x4567), _mm_castps_si128(vi4x0123), 12));
const __m128 vi5x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi5x4567), _mm_castps_si128(vi5x0123), 12));
const __m128 vi6x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi6x4567), _mm_castps_si128(vi6x0123), 12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x3456, vk00));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x3456, vk10));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x3456, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x3456, vk20));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
vi5x0123 = vi5x4567;
vi6x0123 = vi6x4567;
const __m128 vi0x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x89AB), _mm_castps_si128(vi0x4567), 4));
const __m128 vi1x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x89AB), _mm_castps_si128(vi1x4567), 4));
const __m128 vi2x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x89AB), _mm_castps_si128(vi2x4567), 4));
const __m128 vi3x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi3x89AB), _mm_castps_si128(vi3x4567), 4));
const __m128 vi4x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi4x89AB), _mm_castps_si128(vi4x4567), 4));
const __m128 vi5x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi5x89AB), _mm_castps_si128(vi5x4567), 4));
const __m128 vi6x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi6x89AB), _mm_castps_si128(vi6x4567), 4));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x5678, vk02));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x5678, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x5678, vk12));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x5678, vk22));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
vi5x4567 = vi5x89AB;
vi6x4567 = vi6x89AB;
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
__m128 vo1 = _mm_max_ps(vo1p0, vmin);
__m128 vo2 = _mm_max_ps(vo2p0, vmin);
__m128 vo3 = _mm_max_ps(vo3p0, vmin);
__m128 vo4 = _mm_max_ps(vo4p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
vo1 = _mm_min_ps(vo1, vmax);
vo2 = _mm_min_ps(vo2, vmax);
vo3 = _mm_min_ps(vo3, vmax);
vo4 = _mm_min_ps(vo4, vmax);
_mm_storeu_ps(o4, vo4);
o4 += 4;
_mm_storeu_ps(o3, vo3);
o3 += 4;
_mm_storeu_ps(o2, vo2);
o2 += 4;
_mm_storeu_ps(o1, vo1);
o1 += 4;
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vmask, vi0x4567);
vi1x4567 = _mm_and_ps(vmask, vi1x4567);
vi2x4567 = _mm_and_ps(vmask, vi2x4567);
vi3x4567 = _mm_and_ps(vmask, vi3x4567);
vi4x4567 = _mm_and_ps(vmask, vi4x4567);
vi5x4567 = _mm_and_ps(vmask, vi5x4567);
vi6x4567 = _mm_and_ps(vmask, vi6x4567);
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
__m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
__m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi3x4567, vk01));
__m128 vo4p0 = _mm_add_ps(vbias, _mm_mul_ps(vi4x4567, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x4567, vk11));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x4567, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x4567, vk21));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x4567, vk21));
const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si128(vi0x0123), 12));
const __m128 vi1x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x4567), _mm_castps_si128(vi1x0123), 12));
const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si128(vi2x0123), 12));
const __m128 vi3x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi3x4567), _mm_castps_si128(vi3x0123), 12));
const __m128 vi4x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi4x4567), _mm_castps_si128(vi4x0123), 12));
const __m128 vi5x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi5x4567), _mm_castps_si128(vi5x0123), 12));
const __m128 vi6x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi6x4567), _mm_castps_si128(vi6x0123), 12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x3456, vk00));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x3456, vk10));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x3456, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x3456, vk20));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x3456, vk20));
const __m128i vzero = _mm_setzero_si128();
const __m128 vi0x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi0x4567), 4));
const __m128 vi1x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi1x4567), 4));
const __m128 vi2x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi2x4567), 4));
const __m128 vi3x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi3x4567), 4));
const __m128 vi4x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi4x4567), 4));
const __m128 vi5x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi5x4567), 4));
const __m128 vi6x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi6x4567), 4));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x5678, vk02));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x5678, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x5678, vk12));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x5678, vk22));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x5678, vk22));
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
__m128 vo1 = _mm_max_ps(vo1p0, vmin);
__m128 vo2 = _mm_max_ps(vo2p0, vmin);
__m128 vo3 = _mm_max_ps(vo3p0, vmin);
__m128 vo4 = _mm_max_ps(vo4p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
vo1 = _mm_min_ps(vo1, vmax);
vo2 = _mm_min_ps(vo2, vmax);
vo3 = _mm_min_ps(vo3, vmax);
vo4 = _mm_min_ps(vo4, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
_mm_storeu_ps(o4, vo4);
o4 += 4;
_mm_storeu_ps(o3, vo3);
o3 += 4;
_mm_storeu_ps(o2, vo2);
o2 += 4;
_mm_storeu_ps(o1, vo1);
o1 += 4;
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o4, vo4);
o4 += 2;
_mm_storel_pi((__m64*) o3, vo3);
o3 += 2;
_mm_storel_pi((__m64*) o2, vo2);
o2 += 2;
_mm_storel_pi((__m64*) o1, vo1);
o1 += 2;
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
vo1 = _mm_movehl_ps(vo1, vo1);
vo2 = _mm_movehl_ps(vo2, vo2);
vo3 = _mm_movehl_ps(vo3, vo3);
vo4 = _mm_movehl_ps(vo4, vo4);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o4, vo4);
o4 += 1;
_mm_store_ss(o3, vo3);
o3 += 1;
_mm_store_ss(o2, vo2);
o2 += 1;
_mm_store_ss(o1, vo1);
o1 += 1;
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i5 - input_decrement);
i1 = (const float*) ((uintptr_t) i6 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
i6 = (const float*) ((uintptr_t) i5 + input_width);
o0 = o4;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
o4 = (float*) ((uintptr_t) o3 + input_width);
output_height = doz(output_height, 5);
} while (output_height != 0);
}
| 16,564 | 42.363874 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-ssse3-6x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_6x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk10 = _mm_load1_ps(weights + 4);
const __m128 vk11 = _mm_load1_ps(weights + 5);
const __m128 vk12 = _mm_load1_ps(weights + 6);
const __m128 vk20 = _mm_load1_ps(weights + 7);
const __m128 vk21 = _mm_load1_ps(weights + 8);
const __m128 vk22 = _mm_load1_ps(weights + 9);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
float* o4 = (float*) ((uintptr_t) o3 + input_width);
float* o5 = (float*) ((uintptr_t) o4 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
o4 = o3;
}
if XNN_UNPREDICTABLE(output_height < 6) {
i6 = zero;
o5 = o4;
}
if XNN_UNPREDICTABLE(output_height < 7) {
i7 = zero;
}
__m128 vi0x0123 = _mm_setzero_ps();
__m128 vi1x0123 = _mm_setzero_ps();
__m128 vi2x0123 = _mm_setzero_ps();
__m128 vi3x0123 = _mm_setzero_ps();
__m128 vi4x0123 = _mm_setzero_ps();
__m128 vi5x0123 = _mm_setzero_ps();
__m128 vi6x0123 = _mm_setzero_ps();
__m128 vi7x0123 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vi3x4567 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vi4x4567 = _mm_loadu_ps(i4);
i4 += 4;
__m128 vi5x4567 = _mm_loadu_ps(i5);
i5 += 4;
__m128 vi6x4567 = _mm_loadu_ps(i6);
i6 += 4;
__m128 vi7x4567 = _mm_loadu_ps(i7);
i7 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3x89AB = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4x89AB = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi5x89AB = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vi6x89AB = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vi7x89AB = _mm_loadu_ps(i7);
i7 += 4;
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
__m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
__m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi3x4567, vk01));
__m128 vo4p0 = _mm_add_ps(vbias, _mm_mul_ps(vi4x4567, vk01));
__m128 vo5p0 = _mm_add_ps(vbias, _mm_mul_ps(vi5x4567, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x4567, vk11));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x4567, vk11));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi6x4567, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x4567, vk21));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x4567, vk21));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi7x4567, vk21));
const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si128(vi0x0123), 12));
const __m128 vi1x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x4567), _mm_castps_si128(vi1x0123), 12));
const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si128(vi2x0123), 12));
const __m128 vi3x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi3x4567), _mm_castps_si128(vi3x0123), 12));
const __m128 vi4x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi4x4567), _mm_castps_si128(vi4x0123), 12));
const __m128 vi5x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi5x4567), _mm_castps_si128(vi5x0123), 12));
const __m128 vi6x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi6x4567), _mm_castps_si128(vi6x0123), 12));
const __m128 vi7x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi7x4567), _mm_castps_si128(vi7x0123), 12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x3456, vk00));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x3456, vk00));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi5x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x3456, vk10));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x3456, vk10));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi6x3456, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x3456, vk20));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x3456, vk20));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi7x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
vi5x0123 = vi5x4567;
vi6x0123 = vi6x4567;
vi7x0123 = vi7x4567;
const __m128 vi0x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x89AB), _mm_castps_si128(vi0x4567), 4));
const __m128 vi1x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x89AB), _mm_castps_si128(vi1x4567), 4));
const __m128 vi2x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x89AB), _mm_castps_si128(vi2x4567), 4));
const __m128 vi3x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi3x89AB), _mm_castps_si128(vi3x4567), 4));
const __m128 vi4x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi4x89AB), _mm_castps_si128(vi4x4567), 4));
const __m128 vi5x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi5x89AB), _mm_castps_si128(vi5x4567), 4));
const __m128 vi6x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi6x89AB), _mm_castps_si128(vi6x4567), 4));
const __m128 vi7x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi7x89AB), _mm_castps_si128(vi7x4567), 4));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x5678, vk02));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x5678, vk02));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi5x5678, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x5678, vk12));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x5678, vk12));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi6x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x5678, vk22));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x5678, vk22));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi7x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
vi5x4567 = vi5x89AB;
vi6x4567 = vi6x89AB;
vi7x4567 = vi7x89AB;
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
__m128 vo1 = _mm_max_ps(vo1p0, vmin);
__m128 vo2 = _mm_max_ps(vo2p0, vmin);
__m128 vo3 = _mm_max_ps(vo3p0, vmin);
__m128 vo4 = _mm_max_ps(vo4p0, vmin);
__m128 vo5 = _mm_max_ps(vo5p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
vo1 = _mm_min_ps(vo1, vmax);
vo2 = _mm_min_ps(vo2, vmax);
vo3 = _mm_min_ps(vo3, vmax);
vo4 = _mm_min_ps(vo4, vmax);
vo5 = _mm_min_ps(vo5, vmax);
_mm_storeu_ps(o5, vo5);
o5 += 4;
_mm_storeu_ps(o4, vo4);
o4 += 4;
_mm_storeu_ps(o3, vo3);
o3 += 4;
_mm_storeu_ps(o2, vo2);
o2 += 4;
_mm_storeu_ps(o1, vo1);
o1 += 4;
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vmask, vi0x4567);
vi1x4567 = _mm_and_ps(vmask, vi1x4567);
vi2x4567 = _mm_and_ps(vmask, vi2x4567);
vi3x4567 = _mm_and_ps(vmask, vi3x4567);
vi4x4567 = _mm_and_ps(vmask, vi4x4567);
vi5x4567 = _mm_and_ps(vmask, vi5x4567);
vi6x4567 = _mm_and_ps(vmask, vi6x4567);
vi7x4567 = _mm_and_ps(vmask, vi7x4567);
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
__m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
__m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
__m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi3x4567, vk01));
__m128 vo4p0 = _mm_add_ps(vbias, _mm_mul_ps(vi4x4567, vk01));
__m128 vo5p0 = _mm_add_ps(vbias, _mm_mul_ps(vi5x4567, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x4567, vk11));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x4567, vk11));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi6x4567, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x4567, vk21));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x4567, vk21));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi7x4567, vk21));
const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si128(vi0x0123), 12));
const __m128 vi1x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x4567), _mm_castps_si128(vi1x0123), 12));
const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si128(vi2x0123), 12));
const __m128 vi3x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi3x4567), _mm_castps_si128(vi3x0123), 12));
const __m128 vi4x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi4x4567), _mm_castps_si128(vi4x0123), 12));
const __m128 vi5x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi5x4567), _mm_castps_si128(vi5x0123), 12));
const __m128 vi6x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi6x4567), _mm_castps_si128(vi6x0123), 12));
const __m128 vi7x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi7x4567), _mm_castps_si128(vi7x0123), 12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x3456, vk00));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x3456, vk00));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi5x3456, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x3456, vk10));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x3456, vk10));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi6x3456, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x3456, vk20));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x3456, vk20));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi7x3456, vk20));
const __m128i vzero = _mm_setzero_si128();
const __m128 vi0x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi0x4567), 4));
const __m128 vi1x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi1x4567), 4));
const __m128 vi2x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi2x4567), 4));
const __m128 vi3x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi3x4567), 4));
const __m128 vi4x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi4x4567), 4));
const __m128 vi5x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi5x4567), 4));
const __m128 vi6x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi6x4567), 4));
const __m128 vi7x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi7x4567), 4));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x5678, vk02));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x5678, vk02));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi5x5678, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x5678, vk12));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x5678, vk12));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi6x5678, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x5678, vk22));
vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x5678, vk22));
vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi7x5678, vk22));
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
__m128 vo1 = _mm_max_ps(vo1p0, vmin);
__m128 vo2 = _mm_max_ps(vo2p0, vmin);
__m128 vo3 = _mm_max_ps(vo3p0, vmin);
__m128 vo4 = _mm_max_ps(vo4p0, vmin);
__m128 vo5 = _mm_max_ps(vo5p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
vo1 = _mm_min_ps(vo1, vmax);
vo2 = _mm_min_ps(vo2, vmax);
vo3 = _mm_min_ps(vo3, vmax);
vo4 = _mm_min_ps(vo4, vmax);
vo5 = _mm_min_ps(vo5, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
_mm_storeu_ps(o5, vo5);
o5 += 4;
_mm_storeu_ps(o4, vo4);
o4 += 4;
_mm_storeu_ps(o3, vo3);
o3 += 4;
_mm_storeu_ps(o2, vo2);
o2 += 4;
_mm_storeu_ps(o1, vo1);
o1 += 4;
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o5, vo5);
o5 += 2;
_mm_storel_pi((__m64*) o4, vo4);
o4 += 2;
_mm_storel_pi((__m64*) o3, vo3);
o3 += 2;
_mm_storel_pi((__m64*) o2, vo2);
o2 += 2;
_mm_storel_pi((__m64*) o1, vo1);
o1 += 2;
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
vo1 = _mm_movehl_ps(vo1, vo1);
vo2 = _mm_movehl_ps(vo2, vo2);
vo3 = _mm_movehl_ps(vo3, vo3);
vo4 = _mm_movehl_ps(vo4, vo4);
vo5 = _mm_movehl_ps(vo5, vo5);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o5, vo5);
o5 += 1;
_mm_store_ss(o4, vo4);
o4 += 1;
_mm_store_ss(o3, vo3);
o3 += 1;
_mm_store_ss(o2, vo2);
o2 += 1;
_mm_store_ss(o1, vo1);
o1 += 1;
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i6 - input_decrement);
i1 = (const float*) ((uintptr_t) i7 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
i6 = (const float*) ((uintptr_t) i5 + input_width);
i7 = (const float*) ((uintptr_t) i6 + input_width);
o0 = o5;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
o4 = (float*) ((uintptr_t) o3 + input_width);
o5 = (float*) ((uintptr_t) o4 + input_width);
output_height = doz(output_height, 6);
} while (output_height != 0);
}
| 19,128 | 43.177829 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2);
i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const v128_t vi0x89AB = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2);
i2 += 4;
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 6,540 | 35.138122 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2);
i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const v128_t vi0x89AB = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2);
i2 += 4;
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, vk21);
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x3456, vk10));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, vk21);
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x3456, vk10));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x3456, vk20));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x5678, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 6,596 | 35.04918 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2);
i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const v128_t vi0x89AB = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2);
i2 += 4;
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, vk21);
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
v128_t vo0p3 = wasm_f32x4_mul(vi0x3456, vk00);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk02));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, vk21);
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
v128_t vo0p3 = wasm_f32x4_mul(vi0x3456, vk00);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk02));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 6,652 | 34.962162 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-arm-loadsplat-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2);
i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const v128_t vi0x89AB = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2);
i2 += 4;
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 6,479 | 35.201117 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-arm-loadsplat-2x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2);
i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3);
i3 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const v128_t vi0x89AB = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3);
i3 += 4;
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
v128_t vo1p1 = wasm_f32x4_mul(vi2x4567, vk11);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk00));
vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
vo1 = wasm_f32x4_min(vo1, vmax);
wasm_v128_store(o1, vo1);
o1 += 4;
wasm_v128_store(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
v128_t vo1p1 = wasm_f32x4_mul(vi2x4567, vk11);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk00));
vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk20));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
vo1 = wasm_f32x4_min(vo1, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o1, vo1);
o1 += 4;
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o1, vo1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
vo1 = wasm_v64x2_shuffle(vo1, vo1, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o1, vo1, 0);
o1 += 1;
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 9,272 | 38.292373 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-arm-loadsplat-2x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2);
i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3);
i3 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const v128_t vi0x89AB = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3);
i3 += 4;
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
vo1 = wasm_f32x4_min(vo1, vmax);
wasm_v128_store(o1, vo1);
o1 += 4;
wasm_v128_store(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
vo1 = wasm_f32x4_min(vo1, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o1, vo1);
o1 += 4;
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o1, vo1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
vo1 = wasm_v64x2_shuffle(vo1, vo1, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o1, vo1, 0);
o1 += 1;
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 9,155 | 38.465517 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-arm-loadsplat-3x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_3x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2);
i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3);
i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4);
i4 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const v128_t vi0x89AB = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4);
i4 += 4;
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
vo1 = wasm_f32x4_min(vo1, vmax);
vo2 = wasm_f32x4_min(vo2, vmax);
wasm_v128_store(o2, vo2);
o2 += 4;
wasm_v128_store(o1, vo1);
o1 += 4;
wasm_v128_store(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
vo1 = wasm_f32x4_min(vo1, vmax);
vo2 = wasm_f32x4_min(vo2, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o2, vo2);
o2 += 4;
wasm_v128_store(o1, vo1);
o1 += 4;
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o2, vo2, 0);
o2 += 2;
wasm_v128_store64_lane(o1, vo1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
vo1 = wasm_v64x2_shuffle(vo1, vo1, 1, 1);
vo2 = wasm_v64x2_shuffle(vo2, vo2, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o2, vo2, 0);
o2 += 1;
wasm_v128_store32_lane(o1, vo1, 0);
o1 += 1;
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i3 - input_decrement);
i1 = (const float*) ((uintptr_t) i4 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
o0 = o2;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
output_height = doz(output_height, 3);
} while (output_height != 0);
}
| 11,777 | 40.618375 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-arm-loadsplat-4x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_4x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi5x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2);
i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3);
i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4);
i4 += 4;
v128_t vi5x4567 = wasm_v128_load(i5);
i5 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const v128_t vi0x89AB = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5x89AB = wasm_v128_load(i5);
i5 += 4;
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
vi5x0123 = vi5x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
vi5x4567 = vi5x89AB;
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
vo1 = wasm_f32x4_min(vo1, vmax);
vo2 = wasm_f32x4_min(vo2, vmax);
vo3 = wasm_f32x4_min(vo3, vmax);
wasm_v128_store(o3, vo3);
o3 += 4;
wasm_v128_store(o2, vo2);
o2 += 4;
wasm_v128_store(o1, vo1);
o1 += 4;
wasm_v128_store(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
vi5x4567 = wasm_v128_and(vmask, vi5x4567);
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
vo1 = wasm_f32x4_min(vo1, vmax);
vo2 = wasm_f32x4_min(vo2, vmax);
vo3 = wasm_f32x4_min(vo3, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o3, vo3);
o3 += 4;
wasm_v128_store(o2, vo2);
o2 += 4;
wasm_v128_store(o1, vo1);
o1 += 4;
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o3, vo3, 0);
o3 += 2;
wasm_v128_store64_lane(o2, vo2, 0);
o2 += 2;
wasm_v128_store64_lane(o1, vo1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
vo1 = wasm_v64x2_shuffle(vo1, vo1, 1, 1);
vo2 = wasm_v64x2_shuffle(vo2, vo2, 1, 1);
vo3 = wasm_v64x2_shuffle(vo3, vo3, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o3, vo3, 0);
o3 += 1;
wasm_v128_store32_lane(o2, vo2, 0);
o2 += 1;
wasm_v128_store32_lane(o1, vo1, 0);
o1 += 1;
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i4 - input_decrement);
i1 = (const float*) ((uintptr_t) i5 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
o0 = o3;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
output_height = doz(output_height, 4);
} while (output_height != 0);
}
| 14,399 | 42.113772 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-arm-loadsplat-5x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_5x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
float* o4 = (float*) ((uintptr_t) o3 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
o4 = o3;
}
if XNN_UNPREDICTABLE(output_height < 6) {
i6 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi5x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi6x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2);
i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3);
i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4);
i4 += 4;
v128_t vi5x4567 = wasm_v128_load(i5);
i5 += 4;
v128_t vi6x4567 = wasm_v128_load(i6);
i6 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const v128_t vi0x89AB = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5x89AB = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6x89AB = wasm_v128_load(i6);
i6 += 4;
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
vi5x0123 = vi5x4567;
vi6x0123 = vi6x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
vi5x4567 = vi5x89AB;
vi6x4567 = vi6x89AB;
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
vo1 = wasm_f32x4_min(vo1, vmax);
vo2 = wasm_f32x4_min(vo2, vmax);
vo3 = wasm_f32x4_min(vo3, vmax);
vo4 = wasm_f32x4_min(vo4, vmax);
wasm_v128_store(o4, vo4);
o4 += 4;
wasm_v128_store(o3, vo3);
o3 += 4;
wasm_v128_store(o2, vo2);
o2 += 4;
wasm_v128_store(o1, vo1);
o1 += 4;
wasm_v128_store(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
vi5x4567 = wasm_v128_and(vmask, vi5x4567);
vi6x4567 = wasm_v128_and(vmask, vi6x4567);
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
vo1 = wasm_f32x4_min(vo1, vmax);
vo2 = wasm_f32x4_min(vo2, vmax);
vo3 = wasm_f32x4_min(vo3, vmax);
vo4 = wasm_f32x4_min(vo4, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o4, vo4);
o4 += 4;
wasm_v128_store(o3, vo3);
o3 += 4;
wasm_v128_store(o2, vo2);
o2 += 4;
wasm_v128_store(o1, vo1);
o1 += 4;
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o4, vo4, 0);
o4 += 2;
wasm_v128_store64_lane(o3, vo3, 0);
o3 += 2;
wasm_v128_store64_lane(o2, vo2, 0);
o2 += 2;
wasm_v128_store64_lane(o1, vo1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
vo1 = wasm_v64x2_shuffle(vo1, vo1, 1, 1);
vo2 = wasm_v64x2_shuffle(vo2, vo2, 1, 1);
vo3 = wasm_v64x2_shuffle(vo3, vo3, 1, 1);
vo4 = wasm_v64x2_shuffle(vo4, vo4, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o4, vo4, 0);
o4 += 1;
wasm_v128_store32_lane(o3, vo3, 0);
o3 += 1;
wasm_v128_store32_lane(o2, vo2, 0);
o2 += 1;
wasm_v128_store32_lane(o1, vo1, 0);
o1 += 1;
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i5 - input_decrement);
i1 = (const float*) ((uintptr_t) i6 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
i6 = (const float*) ((uintptr_t) i5 + input_width);
o0 = o4;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
o4 = (float*) ((uintptr_t) o3 + input_width);
output_height = doz(output_height, 5);
} while (output_height != 0);
}
| 17,021 | 43.212987 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-arm-loadsplat-6x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_6x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
float* o4 = (float*) ((uintptr_t) o3 + input_width);
float* o5 = (float*) ((uintptr_t) o4 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
o4 = o3;
}
if XNN_UNPREDICTABLE(output_height < 6) {
i6 = zero;
o5 = o4;
}
if XNN_UNPREDICTABLE(output_height < 7) {
i7 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi5x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi6x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi7x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2);
i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3);
i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4);
i4 += 4;
v128_t vi5x4567 = wasm_v128_load(i5);
i5 += 4;
v128_t vi6x4567 = wasm_v128_load(i6);
i6 += 4;
v128_t vi7x4567 = wasm_v128_load(i7);
i7 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const v128_t vi0x89AB = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5x89AB = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6x89AB = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7x89AB = wasm_v128_load(i7);
i7 += 4;
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
v128_t vo5p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi5x4567, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x4567, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x3456, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
vi5x0123 = vi5x4567;
vi6x0123 = vi6x4567;
vi7x0123 = vi7x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x5678, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
vi5x4567 = vi5x89AB;
vi6x4567 = vi6x89AB;
vi7x4567 = vi7x89AB;
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
v128_t vo5 = wasm_f32x4_max(vo5p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
vo1 = wasm_f32x4_min(vo1, vmax);
vo2 = wasm_f32x4_min(vo2, vmax);
vo3 = wasm_f32x4_min(vo3, vmax);
vo4 = wasm_f32x4_min(vo4, vmax);
vo5 = wasm_f32x4_min(vo5, vmax);
wasm_v128_store(o5, vo5);
o5 += 4;
wasm_v128_store(o4, vo4);
o4 += 4;
wasm_v128_store(o3, vo3);
o3 += 4;
wasm_v128_store(o2, vo2);
o2 += 4;
wasm_v128_store(o1, vo1);
o1 += 4;
wasm_v128_store(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
vi5x4567 = wasm_v128_and(vmask, vi5x4567);
vi6x4567 = wasm_v128_and(vmask, vi6x4567);
vi7x4567 = wasm_v128_and(vmask, vi7x4567);
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
v128_t vo5p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi5x4567, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x4567, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x3456, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x3456, vk20));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x5678, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x5678, vk22));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
v128_t vo5 = wasm_f32x4_max(vo5p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
vo1 = wasm_f32x4_min(vo1, vmax);
vo2 = wasm_f32x4_min(vo2, vmax);
vo3 = wasm_f32x4_min(vo3, vmax);
vo4 = wasm_f32x4_min(vo4, vmax);
vo5 = wasm_f32x4_min(vo5, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o5, vo5);
o5 += 4;
wasm_v128_store(o4, vo4);
o4 += 4;
wasm_v128_store(o3, vo3);
o3 += 4;
wasm_v128_store(o2, vo2);
o2 += 4;
wasm_v128_store(o1, vo1);
o1 += 4;
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o5, vo5, 0);
o5 += 2;
wasm_v128_store64_lane(o4, vo4, 0);
o4 += 2;
wasm_v128_store64_lane(o3, vo3, 0);
o3 += 2;
wasm_v128_store64_lane(o2, vo2, 0);
o2 += 2;
wasm_v128_store64_lane(o1, vo1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
vo1 = wasm_v64x2_shuffle(vo1, vo1, 1, 1);
vo2 = wasm_v64x2_shuffle(vo2, vo2, 1, 1);
vo3 = wasm_v64x2_shuffle(vo3, vo3, 1, 1);
vo4 = wasm_v64x2_shuffle(vo4, vo4, 1, 1);
vo5 = wasm_v64x2_shuffle(vo5, vo5, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o5, vo5, 0);
o5 += 1;
wasm_v128_store32_lane(o4, vo4, 0);
o4 += 1;
wasm_v128_store32_lane(o3, vo3, 0);
o3 += 1;
wasm_v128_store32_lane(o2, vo2, 0);
o2 += 1;
wasm_v128_store32_lane(o1, vo1, 0);
o1 += 1;
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i6 - input_decrement);
i1 = (const float*) ((uintptr_t) i7 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
i6 = (const float*) ((uintptr_t) i5 + input_width);
i7 = (const float*) ((uintptr_t) i6 + input_width);
o0 = o5;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
o4 = (float*) ((uintptr_t) o3 + input_width);
o5 = (float*) ((uintptr_t) o4 + input_width);
output_height = doz(output_height, 6);
} while (output_height != 0);
}
| 19,643 | 44.055046 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-arm-splat-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 6,672 | 36.488764 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-arm-splat-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 6,728 | 36.383333 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-arm-splat-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
v128_t vo0p3 = wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
v128_t vo0p3 = wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 6,784 | 36.28022 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-arm-splat-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 6,611 | 36.568182 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-arm-splat-2x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
v128_t vo1p1 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
vo1 = wasm_f32x4_min(vo1, vmax);
wasm_v128_store(o1, vo1); o1 += 4;
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
v128_t vo1p1 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
vo1 = wasm_f32x4_min(vo1, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o1, vo1); o1 += 4;
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o1, vo1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
vo1 = wasm_v64x2_shuffle(vo1, vo1, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o1, vo1, 0);
o1 += 1;
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 10,244 | 43.350649 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-arm-splat-2x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
vo1 = wasm_f32x4_min(vo1, vmax);
wasm_v128_store(o1, vo1); o1 += 4;
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
vo1 = wasm_f32x4_min(vo1, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o1, vo1); o1 += 4;
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o1, vo1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
vo1 = wasm_v64x2_shuffle(vo1, vo1, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o1, vo1, 0);
o1 += 1;
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 10,127 | 43.61674 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-arm-splat-3x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_3x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
vo1 = wasm_f32x4_min(vo1, vmax);
vo2 = wasm_f32x4_min(vo2, vmax);
wasm_v128_store(o2, vo2); o2 += 4;
wasm_v128_store(o1, vo1); o1 += 4;
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
vo1 = wasm_f32x4_min(vo1, vmax);
vo2 = wasm_f32x4_min(vo2, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o2, vo2); o2 += 4;
wasm_v128_store(o1, vo1); o1 += 4;
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o2, vo2, 0);
o2 += 2;
wasm_v128_store64_lane(o1, vo1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
vo1 = wasm_v64x2_shuffle(vo1, vo1, 1, 1);
vo2 = wasm_v64x2_shuffle(vo2, vo2, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o2, vo2, 0);
o2 += 1;
wasm_v128_store32_lane(o1, vo1, 0);
o1 += 1;
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i3 - input_decrement);
i1 = (const float*) ((uintptr_t) i4 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
o0 = o2;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
output_height = doz(output_height, 3);
} while (output_height != 0);
}
| 13,589 | 48.23913 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-arm-splat-4x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_4x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi5x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
vi5x0123 = vi5x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
vi5x4567 = vi5x89AB;
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
vo1 = wasm_f32x4_min(vo1, vmax);
vo2 = wasm_f32x4_min(vo2, vmax);
vo3 = wasm_f32x4_min(vo3, vmax);
wasm_v128_store(o3, vo3); o3 += 4;
wasm_v128_store(o2, vo2); o2 += 4;
wasm_v128_store(o1, vo1); o1 += 4;
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
vi5x4567 = wasm_v128_and(vmask, vi5x4567);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
vo1 = wasm_f32x4_min(vo1, vmax);
vo2 = wasm_f32x4_min(vo2, vmax);
vo3 = wasm_f32x4_min(vo3, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o3, vo3); o3 += 4;
wasm_v128_store(o2, vo2); o2 += 4;
wasm_v128_store(o1, vo1); o1 += 4;
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o3, vo3, 0);
o3 += 2;
wasm_v128_store64_lane(o2, vo2, 0);
o2 += 2;
wasm_v128_store64_lane(o1, vo1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
vo1 = wasm_v64x2_shuffle(vo1, vo1, 1, 1);
vo2 = wasm_v64x2_shuffle(vo2, vo2, 1, 1);
vo3 = wasm_v64x2_shuffle(vo3, vo3, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o3, vo3, 0);
o3 += 1;
wasm_v128_store32_lane(o2, vo2, 0);
o2 += 1;
wasm_v128_store32_lane(o1, vo1, 0);
o1 += 1;
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i4 - input_decrement);
i1 = (const float*) ((uintptr_t) i5 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
o0 = o3;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
output_height = doz(output_height, 4);
} while (output_height != 0);
}
| 17,051 | 51.467692 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-arm-splat-5x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_5x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
float* o4 = (float*) ((uintptr_t) o3 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
o4 = o3;
}
if XNN_UNPREDICTABLE(output_height < 6) {
i6 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi5x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi6x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo4p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
vi5x0123 = vi5x4567;
vi6x0123 = vi6x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
vi5x4567 = vi5x89AB;
vi6x4567 = vi6x89AB;
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
vo1 = wasm_f32x4_min(vo1, vmax);
vo2 = wasm_f32x4_min(vo2, vmax);
vo3 = wasm_f32x4_min(vo3, vmax);
vo4 = wasm_f32x4_min(vo4, vmax);
wasm_v128_store(o4, vo4); o4 += 4;
wasm_v128_store(o3, vo3); o3 += 4;
wasm_v128_store(o2, vo2); o2 += 4;
wasm_v128_store(o1, vo1); o1 += 4;
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo4p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
vi5x4567 = wasm_v128_and(vmask, vi5x4567);
vi6x4567 = wasm_v128_and(vmask, vi6x4567);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
vo1 = wasm_f32x4_min(vo1, vmax);
vo2 = wasm_f32x4_min(vo2, vmax);
vo3 = wasm_f32x4_min(vo3, vmax);
vo4 = wasm_f32x4_min(vo4, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o4, vo4); o4 += 4;
wasm_v128_store(o3, vo3); o3 += 4;
wasm_v128_store(o2, vo2); o2 += 4;
wasm_v128_store(o1, vo1); o1 += 4;
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o4, vo4, 0);
o4 += 2;
wasm_v128_store64_lane(o3, vo3, 0);
o3 += 2;
wasm_v128_store64_lane(o2, vo2, 0);
o2 += 2;
wasm_v128_store64_lane(o1, vo1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
vo1 = wasm_v64x2_shuffle(vo1, vo1, 1, 1);
vo2 = wasm_v64x2_shuffle(vo2, vo2, 1, 1);
vo3 = wasm_v64x2_shuffle(vo3, vo3, 1, 1);
vo4 = wasm_v64x2_shuffle(vo4, vo4, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o4, vo4, 0);
o4 += 1;
wasm_v128_store32_lane(o3, vo3, 0);
o3 += 1;
wasm_v128_store32_lane(o2, vo2, 0);
o2 += 1;
wasm_v128_store32_lane(o1, vo1, 0);
o1 += 1;
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i5 - input_decrement);
i1 = (const float*) ((uintptr_t) i6 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
i6 = (const float*) ((uintptr_t) i5 + input_width);
o0 = o4;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
o4 = (float*) ((uintptr_t) o3 + input_width);
output_height = doz(output_height, 5);
} while (output_height != 0);
}
| 20,513 | 53.850267 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-arm-splat-6x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_6x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
float* o4 = (float*) ((uintptr_t) o3 + input_width);
float* o5 = (float*) ((uintptr_t) o4 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
o4 = o3;
}
if XNN_UNPREDICTABLE(output_height < 6) {
i6 = zero;
o5 = o4;
}
if XNN_UNPREDICTABLE(output_height < 7) {
i7 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi5x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi6x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi7x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
v128_t vi7x4567 = wasm_v128_load(i7); i7 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo4p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo5p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
vi5x0123 = vi5x4567;
vi6x0123 = vi6x4567;
vi7x0123 = vi7x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
vi5x4567 = vi5x89AB;
vi6x4567 = vi6x89AB;
vi7x4567 = vi7x89AB;
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
v128_t vo5 = wasm_f32x4_max(vo5p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
vo1 = wasm_f32x4_min(vo1, vmax);
vo2 = wasm_f32x4_min(vo2, vmax);
vo3 = wasm_f32x4_min(vo3, vmax);
vo4 = wasm_f32x4_min(vo4, vmax);
vo5 = wasm_f32x4_min(vo5, vmax);
wasm_v128_store(o5, vo5); o5 += 4;
wasm_v128_store(o4, vo4); o4 += 4;
wasm_v128_store(o3, vo3); o3 += 4;
wasm_v128_store(o2, vo2); o2 += 4;
wasm_v128_store(o1, vo1); o1 += 4;
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo4p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo5p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
vi5x4567 = wasm_v128_and(vmask, vi5x4567);
vi6x4567 = wasm_v128_and(vmask, vi6x4567);
vi7x4567 = wasm_v128_and(vmask, vi7x4567);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
v128_t vo4 = wasm_f32x4_max(vo4p0, vmin);
v128_t vo5 = wasm_f32x4_max(vo5p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
vo1 = wasm_f32x4_min(vo1, vmax);
vo2 = wasm_f32x4_min(vo2, vmax);
vo3 = wasm_f32x4_min(vo3, vmax);
vo4 = wasm_f32x4_min(vo4, vmax);
vo5 = wasm_f32x4_min(vo5, vmax);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o5, vo5); o5 += 4;
wasm_v128_store(o4, vo4); o4 += 4;
wasm_v128_store(o3, vo3); o3 += 4;
wasm_v128_store(o2, vo2); o2 += 4;
wasm_v128_store(o1, vo1); o1 += 4;
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o5, vo5, 0);
o5 += 2;
wasm_v128_store64_lane(o4, vo4, 0);
o4 += 2;
wasm_v128_store64_lane(o3, vo3, 0);
o3 += 2;
wasm_v128_store64_lane(o2, vo2, 0);
o2 += 2;
wasm_v128_store64_lane(o1, vo1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
vo1 = wasm_v64x2_shuffle(vo1, vo1, 1, 1);
vo2 = wasm_v64x2_shuffle(vo2, vo2, 1, 1);
vo3 = wasm_v64x2_shuffle(vo3, vo3, 1, 1);
vo4 = wasm_v64x2_shuffle(vo4, vo4, 1, 1);
vo5 = wasm_v64x2_shuffle(vo5, vo5, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o5, vo5, 0);
o5 += 1;
wasm_v128_store32_lane(o4, vo4, 0);
o4 += 1;
wasm_v128_store32_lane(o3, vo3, 0);
o3 += 1;
wasm_v128_store32_lane(o2, vo2, 0);
o2 += 1;
wasm_v128_store32_lane(o1, vo1, 0);
o1 += 1;
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i6 - input_decrement);
i1 = (const float*) ((uintptr_t) i7 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
i6 = (const float*) ((uintptr_t) i5 + input_width);
i7 = (const float*) ((uintptr_t) i6 + input_width);
o0 = o5;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
o4 = (float*) ((uintptr_t) o3 + input_width);
o5 = (float*) ((uintptr_t) o4 + input_width);
output_height = doz(output_height, 6);
} while (output_height != 0);
}
| 23,975 | 55.680851 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2);
i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const v128_t vi0x89AB = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2);
i2 += 4;
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 6,544 | 35.160221 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2);
i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const v128_t vi0x89AB = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2);
i2 += 4;
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, vk21);
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x3456, vk10));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, vk21);
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x3456, vk10));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x3456, vk20));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x5678, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 6,600 | 35.071038 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2);
i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const v128_t vi0x89AB = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2);
i2 += 4;
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, vk21);
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
v128_t vo0p3 = wasm_f32x4_mul(vi0x3456, vk00);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk02));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, vk21);
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
v128_t vo0p3 = wasm_f32x4_mul(vi0x3456, vk00);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk02));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 6,656 | 34.983784 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-x86-loadsplat-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2);
i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const v128_t vi0x89AB = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2);
i2 += 4;
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 6,483 | 35.223464 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-x86-loadsplat-2x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2);
i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3);
i3 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const v128_t vi0x89AB = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3);
i3 += 4;
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
v128_t vo1p1 = wasm_f32x4_mul(vi2x4567, vk11);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk00));
vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
vo1 = wasm_f32x4_pmin(vmax, vo1);
wasm_v128_store(o1, vo1);
o1 += 4;
wasm_v128_store(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, vk11);
v128_t vo1p1 = wasm_f32x4_mul(vi2x4567, vk11);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk00));
vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20));
vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk20));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, vk12));
vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
vo1 = wasm_f32x4_pmin(vmax, vo1);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o1, vo1);
o1 += 4;
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o1, vo1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
vo1 = wasm_v64x2_shuffle(vo1, vo1, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o1, vo1, 0);
o1 += 1;
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 9,280 | 38.326271 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-x86-loadsplat-2x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2);
i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3);
i3 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const v128_t vi0x89AB = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3);
i3 += 4;
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
vo1 = wasm_f32x4_pmin(vmax, vo1);
wasm_v128_store(o1, vo1);
o1 += 4;
wasm_v128_store(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
vo1 = wasm_f32x4_pmin(vmax, vo1);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o1, vo1);
o1 += 4;
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o1, vo1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
vo1 = wasm_v64x2_shuffle(vo1, vo1, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o1, vo1, 0);
o1 += 1;
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 9,163 | 38.5 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-x86-loadsplat-3x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_3x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2);
i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3);
i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4);
i4 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const v128_t vi0x89AB = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4);
i4 += 4;
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
v128_t vo2 = wasm_f32x4_pmax(vmin, vo2p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
vo1 = wasm_f32x4_pmin(vmax, vo1);
vo2 = wasm_f32x4_pmin(vmax, vo2);
wasm_v128_store(o2, vo2);
o2 += 4;
wasm_v128_store(o1, vo1);
o1 += 4;
wasm_v128_store(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
v128_t vo2 = wasm_f32x4_pmax(vmin, vo2p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
vo1 = wasm_f32x4_pmin(vmax, vo1);
vo2 = wasm_f32x4_pmin(vmax, vo2);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o2, vo2);
o2 += 4;
wasm_v128_store(o1, vo1);
o1 += 4;
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o2, vo2, 0);
o2 += 2;
wasm_v128_store64_lane(o1, vo1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
vo1 = wasm_v64x2_shuffle(vo1, vo1, 1, 1);
vo2 = wasm_v64x2_shuffle(vo2, vo2, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o2, vo2, 0);
o2 += 1;
wasm_v128_store32_lane(o1, vo1, 0);
o1 += 1;
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i3 - input_decrement);
i1 = (const float*) ((uintptr_t) i4 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
o0 = o2;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
output_height = doz(output_height, 3);
} while (output_height != 0);
}
| 11,789 | 40.660777 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-x86-loadsplat-4x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_4x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi5x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2);
i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3);
i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4);
i4 += 4;
v128_t vi5x4567 = wasm_v128_load(i5);
i5 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const v128_t vi0x89AB = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5x89AB = wasm_v128_load(i5);
i5 += 4;
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
vi5x0123 = vi5x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
vi5x4567 = vi5x89AB;
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
v128_t vo2 = wasm_f32x4_pmax(vmin, vo2p0);
v128_t vo3 = wasm_f32x4_pmax(vmin, vo3p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
vo1 = wasm_f32x4_pmin(vmax, vo1);
vo2 = wasm_f32x4_pmin(vmax, vo2);
vo3 = wasm_f32x4_pmin(vmax, vo3);
wasm_v128_store(o3, vo3);
o3 += 4;
wasm_v128_store(o2, vo2);
o2 += 4;
wasm_v128_store(o1, vo1);
o1 += 4;
wasm_v128_store(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
vi5x4567 = wasm_v128_and(vmask, vi5x4567);
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
v128_t vo2 = wasm_f32x4_pmax(vmin, vo2p0);
v128_t vo3 = wasm_f32x4_pmax(vmin, vo3p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
vo1 = wasm_f32x4_pmin(vmax, vo1);
vo2 = wasm_f32x4_pmin(vmax, vo2);
vo3 = wasm_f32x4_pmin(vmax, vo3);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o3, vo3);
o3 += 4;
wasm_v128_store(o2, vo2);
o2 += 4;
wasm_v128_store(o1, vo1);
o1 += 4;
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o3, vo3, 0);
o3 += 2;
wasm_v128_store64_lane(o2, vo2, 0);
o2 += 2;
wasm_v128_store64_lane(o1, vo1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
vo1 = wasm_v64x2_shuffle(vo1, vo1, 1, 1);
vo2 = wasm_v64x2_shuffle(vo2, vo2, 1, 1);
vo3 = wasm_v64x2_shuffle(vo3, vo3, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o3, vo3, 0);
o3 += 1;
wasm_v128_store32_lane(o2, vo2, 0);
o2 += 1;
wasm_v128_store32_lane(o1, vo1, 0);
o1 += 1;
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i4 - input_decrement);
i1 = (const float*) ((uintptr_t) i5 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
o0 = o3;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
output_height = doz(output_height, 4);
} while (output_height != 0);
}
| 14,415 | 42.161677 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-x86-loadsplat-5x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_5x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
float* o4 = (float*) ((uintptr_t) o3 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
o4 = o3;
}
if XNN_UNPREDICTABLE(output_height < 6) {
i6 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi5x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi6x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2);
i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3);
i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4);
i4 += 4;
v128_t vi5x4567 = wasm_v128_load(i5);
i5 += 4;
v128_t vi6x4567 = wasm_v128_load(i6);
i6 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const v128_t vi0x89AB = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5x89AB = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6x89AB = wasm_v128_load(i6);
i6 += 4;
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
vi5x0123 = vi5x4567;
vi6x0123 = vi6x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
vi5x4567 = vi5x89AB;
vi6x4567 = vi6x89AB;
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
v128_t vo2 = wasm_f32x4_pmax(vmin, vo2p0);
v128_t vo3 = wasm_f32x4_pmax(vmin, vo3p0);
v128_t vo4 = wasm_f32x4_pmax(vmin, vo4p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
vo1 = wasm_f32x4_pmin(vmax, vo1);
vo2 = wasm_f32x4_pmin(vmax, vo2);
vo3 = wasm_f32x4_pmin(vmax, vo3);
vo4 = wasm_f32x4_pmin(vmax, vo4);
wasm_v128_store(o4, vo4);
o4 += 4;
wasm_v128_store(o3, vo3);
o3 += 4;
wasm_v128_store(o2, vo2);
o2 += 4;
wasm_v128_store(o1, vo1);
o1 += 4;
wasm_v128_store(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
vi5x4567 = wasm_v128_and(vmask, vi5x4567);
vi6x4567 = wasm_v128_and(vmask, vi6x4567);
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
v128_t vo2 = wasm_f32x4_pmax(vmin, vo2p0);
v128_t vo3 = wasm_f32x4_pmax(vmin, vo3p0);
v128_t vo4 = wasm_f32x4_pmax(vmin, vo4p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
vo1 = wasm_f32x4_pmin(vmax, vo1);
vo2 = wasm_f32x4_pmin(vmax, vo2);
vo3 = wasm_f32x4_pmin(vmax, vo3);
vo4 = wasm_f32x4_pmin(vmax, vo4);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o4, vo4);
o4 += 4;
wasm_v128_store(o3, vo3);
o3 += 4;
wasm_v128_store(o2, vo2);
o2 += 4;
wasm_v128_store(o1, vo1);
o1 += 4;
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o4, vo4, 0);
o4 += 2;
wasm_v128_store64_lane(o3, vo3, 0);
o3 += 2;
wasm_v128_store64_lane(o2, vo2, 0);
o2 += 2;
wasm_v128_store64_lane(o1, vo1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
vo1 = wasm_v64x2_shuffle(vo1, vo1, 1, 1);
vo2 = wasm_v64x2_shuffle(vo2, vo2, 1, 1);
vo3 = wasm_v64x2_shuffle(vo3, vo3, 1, 1);
vo4 = wasm_v64x2_shuffle(vo4, vo4, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o4, vo4, 0);
o4 += 1;
wasm_v128_store32_lane(o3, vo3, 0);
o3 += 1;
wasm_v128_store32_lane(o2, vo2, 0);
o2 += 1;
wasm_v128_store32_lane(o1, vo1, 0);
o1 += 1;
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i5 - input_decrement);
i1 = (const float*) ((uintptr_t) i6 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
i6 = (const float*) ((uintptr_t) i5 + input_width);
o0 = o4;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
o4 = (float*) ((uintptr_t) o3 + input_width);
output_height = doz(output_height, 5);
} while (output_height != 0);
}
| 17,041 | 43.264935 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-x86-loadsplat-6x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_6x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
float* o4 = (float*) ((uintptr_t) o3 + input_width);
float* o5 = (float*) ((uintptr_t) o4 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
o4 = o3;
}
if XNN_UNPREDICTABLE(output_height < 6) {
i6 = zero;
o5 = o4;
}
if XNN_UNPREDICTABLE(output_height < 7) {
i7 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi5x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi6x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi7x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2);
i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3);
i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4);
i4 += 4;
v128_t vi5x4567 = wasm_v128_load(i5);
i5 += 4;
v128_t vi6x4567 = wasm_v128_load(i6);
i6 += 4;
v128_t vi7x4567 = wasm_v128_load(i7);
i7 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
const v128_t vi0x89AB = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5x89AB = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6x89AB = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7x89AB = wasm_v128_load(i7);
i7 += 4;
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
v128_t vo5p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi5x4567, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x4567, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x3456, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x3456, vk20));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
vi5x0123 = vi5x4567;
vi6x0123 = vi6x4567;
vi7x0123 = vi7x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x5678, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x5678, vk22));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
vi5x4567 = vi5x89AB;
vi6x4567 = vi6x89AB;
vi7x4567 = vi7x89AB;
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
v128_t vo2 = wasm_f32x4_pmax(vmin, vo2p0);
v128_t vo3 = wasm_f32x4_pmax(vmin, vo3p0);
v128_t vo4 = wasm_f32x4_pmax(vmin, vo4p0);
v128_t vo5 = wasm_f32x4_pmax(vmin, vo5p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
vo1 = wasm_f32x4_pmin(vmax, vo1);
vo2 = wasm_f32x4_pmin(vmax, vo2);
vo3 = wasm_f32x4_pmin(vmax, vo3);
vo4 = wasm_f32x4_pmin(vmax, vo4);
vo5 = wasm_f32x4_pmin(vmax, vo5);
wasm_v128_store(o5, vo5);
o5 += 4;
wasm_v128_store(o4, vo4);
o4 += 4;
wasm_v128_store(o3, vo3);
o3 += 4;
wasm_v128_store(o2, vo2);
o2 += 4;
wasm_v128_store(o1, vo1);
o1 += 4;
wasm_v128_store(o0, vo0);
o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
vi5x4567 = wasm_v128_and(vmask, vi5x4567);
vi6x4567 = wasm_v128_and(vmask, vi6x4567);
vi7x4567 = wasm_v128_and(vmask, vi7x4567);
v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
v128_t vo5p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi5x4567, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x4567, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x4567, vk21));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x3456, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x3456, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x3456, vk20));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x5678, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x5678, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x5678, vk22));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
v128_t vo2 = wasm_f32x4_pmax(vmin, vo2p0);
v128_t vo3 = wasm_f32x4_pmax(vmin, vo3p0);
v128_t vo4 = wasm_f32x4_pmax(vmin, vo4p0);
v128_t vo5 = wasm_f32x4_pmax(vmin, vo5p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
vo1 = wasm_f32x4_pmin(vmax, vo1);
vo2 = wasm_f32x4_pmin(vmax, vo2);
vo3 = wasm_f32x4_pmin(vmax, vo3);
vo4 = wasm_f32x4_pmin(vmax, vo4);
vo5 = wasm_f32x4_pmin(vmax, vo5);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o5, vo5);
o5 += 4;
wasm_v128_store(o4, vo4);
o4 += 4;
wasm_v128_store(o3, vo3);
o3 += 4;
wasm_v128_store(o2, vo2);
o2 += 4;
wasm_v128_store(o1, vo1);
o1 += 4;
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o5, vo5, 0);
o5 += 2;
wasm_v128_store64_lane(o4, vo4, 0);
o4 += 2;
wasm_v128_store64_lane(o3, vo3, 0);
o3 += 2;
wasm_v128_store64_lane(o2, vo2, 0);
o2 += 2;
wasm_v128_store64_lane(o1, vo1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
vo1 = wasm_v64x2_shuffle(vo1, vo1, 1, 1);
vo2 = wasm_v64x2_shuffle(vo2, vo2, 1, 1);
vo3 = wasm_v64x2_shuffle(vo3, vo3, 1, 1);
vo4 = wasm_v64x2_shuffle(vo4, vo4, 1, 1);
vo5 = wasm_v64x2_shuffle(vo5, vo5, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o5, vo5, 0);
o5 += 1;
wasm_v128_store32_lane(o4, vo4, 0);
o4 += 1;
wasm_v128_store32_lane(o3, vo3, 0);
o3 += 1;
wasm_v128_store32_lane(o2, vo2, 0);
o2 += 1;
wasm_v128_store32_lane(o1, vo1, 0);
o1 += 1;
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i6 - input_decrement);
i1 = (const float*) ((uintptr_t) i7 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
i6 = (const float*) ((uintptr_t) i5 + input_width);
i7 = (const float*) ((uintptr_t) i6 + input_width);
o0 = o5;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
o4 = (float*) ((uintptr_t) o3 + input_width);
o5 = (float*) ((uintptr_t) o4 + input_width);
output_height = doz(output_height, 6);
} while (output_height != 0);
}
| 19,667 | 44.110092 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-x86-splat-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 6,676 | 36.511236 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-x86-splat-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 6,732 | 36.405556 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-x86-splat-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
v128_t vo0p3 = wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
v128_t vo0p2 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
v128_t vo0p3 = wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 6,788 | 36.302198 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-x86-splat-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
} while (--output_height != 0);
}
| 6,615 | 36.590909 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-x86-splat-2x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
v128_t vo1p1 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
vo1 = wasm_f32x4_pmin(vmax, vo1);
wasm_v128_store(o1, vo1); o1 += 4;
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
v128_t vo1p1 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
vo1 = wasm_f32x4_pmin(vmax, vo1);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o1, vo1); o1 += 4;
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o1, vo1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
vo1 = wasm_v64x2_shuffle(vo1, vo1, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o1, vo1, 0);
o1 += 1;
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 10,252 | 43.385281 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-x86-splat-2x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
vo1 = wasm_f32x4_pmin(vmax, vo1);
wasm_v128_store(o1, vo1); o1 += 4;
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
vo1 = wasm_f32x4_pmin(vmax, vo1);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o1, vo1); o1 += 4;
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o1, vo1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
vo1 = wasm_v64x2_shuffle(vo1, vo1, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o1, vo1, 0);
o1 += 1;
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 10,135 | 43.651982 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-x86-splat-3x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_3x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
v128_t vo2 = wasm_f32x4_pmax(vmin, vo2p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
vo1 = wasm_f32x4_pmin(vmax, vo1);
vo2 = wasm_f32x4_pmin(vmax, vo2);
wasm_v128_store(o2, vo2); o2 += 4;
wasm_v128_store(o1, vo1); o1 += 4;
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
v128_t vo2 = wasm_f32x4_pmax(vmin, vo2p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
vo1 = wasm_f32x4_pmin(vmax, vo1);
vo2 = wasm_f32x4_pmin(vmax, vo2);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o2, vo2); o2 += 4;
wasm_v128_store(o1, vo1); o1 += 4;
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o2, vo2, 0);
o2 += 2;
wasm_v128_store64_lane(o1, vo1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
vo1 = wasm_v64x2_shuffle(vo1, vo1, 1, 1);
vo2 = wasm_v64x2_shuffle(vo2, vo2, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o2, vo2, 0);
o2 += 1;
wasm_v128_store32_lane(o1, vo1, 0);
o1 += 1;
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i3 - input_decrement);
i1 = (const float*) ((uintptr_t) i4 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
o0 = o2;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
output_height = doz(output_height, 3);
} while (output_height != 0);
}
| 13,601 | 48.282609 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-x86-splat-4x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_4x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi5x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
vi5x0123 = vi5x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
vi5x4567 = vi5x89AB;
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
v128_t vo2 = wasm_f32x4_pmax(vmin, vo2p0);
v128_t vo3 = wasm_f32x4_pmax(vmin, vo3p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
vo1 = wasm_f32x4_pmin(vmax, vo1);
vo2 = wasm_f32x4_pmin(vmax, vo2);
vo3 = wasm_f32x4_pmin(vmax, vo3);
wasm_v128_store(o3, vo3); o3 += 4;
wasm_v128_store(o2, vo2); o2 += 4;
wasm_v128_store(o1, vo1); o1 += 4;
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
vi5x4567 = wasm_v128_and(vmask, vi5x4567);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
v128_t vo2 = wasm_f32x4_pmax(vmin, vo2p0);
v128_t vo3 = wasm_f32x4_pmax(vmin, vo3p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
vo1 = wasm_f32x4_pmin(vmax, vo1);
vo2 = wasm_f32x4_pmin(vmax, vo2);
vo3 = wasm_f32x4_pmin(vmax, vo3);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o3, vo3); o3 += 4;
wasm_v128_store(o2, vo2); o2 += 4;
wasm_v128_store(o1, vo1); o1 += 4;
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o3, vo3, 0);
o3 += 2;
wasm_v128_store64_lane(o2, vo2, 0);
o2 += 2;
wasm_v128_store64_lane(o1, vo1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
vo1 = wasm_v64x2_shuffle(vo1, vo1, 1, 1);
vo2 = wasm_v64x2_shuffle(vo2, vo2, 1, 1);
vo3 = wasm_v64x2_shuffle(vo3, vo3, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o3, vo3, 0);
o3 += 1;
wasm_v128_store32_lane(o2, vo2, 0);
o2 += 1;
wasm_v128_store32_lane(o1, vo1, 0);
o1 += 1;
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i4 - input_decrement);
i1 = (const float*) ((uintptr_t) i5 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
o0 = o3;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
output_height = doz(output_height, 4);
} while (output_height != 0);
}
| 17,067 | 51.516923 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-x86-splat-5x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_5x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
float* o4 = (float*) ((uintptr_t) o3 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
o4 = o3;
}
if XNN_UNPREDICTABLE(output_height < 6) {
i6 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi5x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi6x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo4p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
vi5x0123 = vi5x4567;
vi6x0123 = vi6x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
vi5x4567 = vi5x89AB;
vi6x4567 = vi6x89AB;
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
v128_t vo2 = wasm_f32x4_pmax(vmin, vo2p0);
v128_t vo3 = wasm_f32x4_pmax(vmin, vo3p0);
v128_t vo4 = wasm_f32x4_pmax(vmin, vo4p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
vo1 = wasm_f32x4_pmin(vmax, vo1);
vo2 = wasm_f32x4_pmin(vmax, vo2);
vo3 = wasm_f32x4_pmin(vmax, vo3);
vo4 = wasm_f32x4_pmin(vmax, vo4);
wasm_v128_store(o4, vo4); o4 += 4;
wasm_v128_store(o3, vo3); o3 += 4;
wasm_v128_store(o2, vo2); o2 += 4;
wasm_v128_store(o1, vo1); o1 += 4;
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo4p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
vi5x4567 = wasm_v128_and(vmask, vi5x4567);
vi6x4567 = wasm_v128_and(vmask, vi6x4567);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
v128_t vo2 = wasm_f32x4_pmax(vmin, vo2p0);
v128_t vo3 = wasm_f32x4_pmax(vmin, vo3p0);
v128_t vo4 = wasm_f32x4_pmax(vmin, vo4p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
vo1 = wasm_f32x4_pmin(vmax, vo1);
vo2 = wasm_f32x4_pmin(vmax, vo2);
vo3 = wasm_f32x4_pmin(vmax, vo3);
vo4 = wasm_f32x4_pmin(vmax, vo4);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o4, vo4); o4 += 4;
wasm_v128_store(o3, vo3); o3 += 4;
wasm_v128_store(o2, vo2); o2 += 4;
wasm_v128_store(o1, vo1); o1 += 4;
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o4, vo4, 0);
o4 += 2;
wasm_v128_store64_lane(o3, vo3, 0);
o3 += 2;
wasm_v128_store64_lane(o2, vo2, 0);
o2 += 2;
wasm_v128_store64_lane(o1, vo1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
vo1 = wasm_v64x2_shuffle(vo1, vo1, 1, 1);
vo2 = wasm_v64x2_shuffle(vo2, vo2, 1, 1);
vo3 = wasm_v64x2_shuffle(vo3, vo3, 1, 1);
vo4 = wasm_v64x2_shuffle(vo4, vo4, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o4, vo4, 0);
o4 += 1;
wasm_v128_store32_lane(o3, vo3, 0);
o3 += 1;
wasm_v128_store32_lane(o2, vo2, 0);
o2 += 1;
wasm_v128_store32_lane(o1, vo1, 0);
o1 += 1;
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i5 - input_decrement);
i1 = (const float*) ((uintptr_t) i6 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
i6 = (const float*) ((uintptr_t) i5 + input_width);
o0 = o4;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
o4 = (float*) ((uintptr_t) o3 + input_width);
output_height = doz(output_height, 5);
} while (output_height != 0);
}
| 20,533 | 53.903743 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3p1-minmax-wasmsimd-x86-splat-6x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_6x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 1);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = input;
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
float* o3 = (float*) ((uintptr_t) o2 + input_width);
float* o4 = (float*) ((uintptr_t) o3 + input_width);
float* o5 = (float*) ((uintptr_t) o4 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i2 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i3 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i4 = zero;
o3 = o2;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i5 = zero;
o4 = o3;
}
if XNN_UNPREDICTABLE(output_height < 6) {
i6 = zero;
o5 = o4;
}
if XNN_UNPREDICTABLE(output_height < 7) {
i7 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi5x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi6x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi7x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
v128_t vi7x4567 = wasm_v128_load(i7); i7 += 4;
size_t w = input_width;
for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo4p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo5p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vi0x0123 = vi0x4567;
vi1x0123 = vi1x4567;
vi2x0123 = vi2x4567;
vi3x0123 = vi3x4567;
vi4x0123 = vi4x4567;
vi5x0123 = vi5x4567;
vi6x0123 = vi6x4567;
vi7x0123 = vi7x4567;
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vi0x4567 = vi0x89AB;
vi1x4567 = vi1x89AB;
vi2x4567 = vi2x89AB;
vi3x4567 = vi3x89AB;
vi4x4567 = vi4x89AB;
vi5x4567 = vi5x89AB;
vi6x4567 = vi6x89AB;
vi7x4567 = vi7x89AB;
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
v128_t vo2 = wasm_f32x4_pmax(vmin, vo2p0);
v128_t vo3 = wasm_f32x4_pmax(vmin, vo3p0);
v128_t vo4 = wasm_f32x4_pmax(vmin, vo4p0);
v128_t vo5 = wasm_f32x4_pmax(vmin, vo5p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
vo1 = wasm_f32x4_pmin(vmax, vo1);
vo2 = wasm_f32x4_pmin(vmax, vo2);
vo3 = wasm_f32x4_pmin(vmax, vo3);
vo4 = wasm_f32x4_pmin(vmax, vo4);
vo5 = wasm_f32x4_pmin(vmax, vo5);
wasm_v128_store(o5, vo5); o5 += 4;
wasm_v128_store(o4, vo4); o4 += 4;
wasm_v128_store(o3, vo3); o3 += 4;
wasm_v128_store(o2, vo2); o2 += 4;
wasm_v128_store(o1, vo1); o1 += 4;
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 1..4 pixels.
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo4p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo5p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
vi5x4567 = wasm_v128_and(vmask, vi5x4567);
vi6x4567 = wasm_v128_and(vmask, vi6x4567);
vi7x4567 = wasm_v128_and(vmask, vi7x4567);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
v128_t vo2 = wasm_f32x4_pmax(vmin, vo2p0);
v128_t vo3 = wasm_f32x4_pmax(vmin, vo3p0);
v128_t vo4 = wasm_f32x4_pmax(vmin, vo4p0);
v128_t vo5 = wasm_f32x4_pmax(vmin, vo5p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
vo1 = wasm_f32x4_pmin(vmax, vo1);
vo2 = wasm_f32x4_pmin(vmax, vo2);
vo3 = wasm_f32x4_pmin(vmax, vo3);
vo4 = wasm_f32x4_pmin(vmax, vo4);
vo5 = wasm_f32x4_pmin(vmax, vo5);
if XNN_LIKELY(w == 4 * sizeof(float)) {
wasm_v128_store(o5, vo5); o5 += 4;
wasm_v128_store(o4, vo4); o4 += 4;
wasm_v128_store(o3, vo3); o3 += 4;
wasm_v128_store(o2, vo2); o2 += 4;
wasm_v128_store(o1, vo1); o1 += 4;
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o5, vo5, 0);
o5 += 2;
wasm_v128_store64_lane(o4, vo4, 0);
o4 += 2;
wasm_v128_store64_lane(o3, vo3, 0);
o3 += 2;
wasm_v128_store64_lane(o2, vo2, 0);
o2 += 2;
wasm_v128_store64_lane(o1, vo1, 0);
o1 += 2;
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
vo1 = wasm_v64x2_shuffle(vo1, vo1, 1, 1);
vo2 = wasm_v64x2_shuffle(vo2, vo2, 1, 1);
vo3 = wasm_v64x2_shuffle(vo3, vo3, 1, 1);
vo4 = wasm_v64x2_shuffle(vo4, vo4, 1, 1);
vo5 = wasm_v64x2_shuffle(vo5, vo5, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o5, vo5, 0);
o5 += 1;
wasm_v128_store32_lane(o4, vo4, 0);
o4 += 1;
wasm_v128_store32_lane(o3, vo3, 0);
o3 += 1;
wasm_v128_store32_lane(o2, vo2, 0);
o2 += 1;
wasm_v128_store32_lane(o1, vo1, 0);
o1 += 1;
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i6 - input_decrement);
i1 = (const float*) ((uintptr_t) i7 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
i6 = (const float*) ((uintptr_t) i5 + input_width);
i7 = (const float*) ((uintptr_t) i6 + input_width);
o0 = o5;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
o3 = (float*) ((uintptr_t) o2 + input_width);
o4 = (float*) ((uintptr_t) o3 + input_width);
o5 = (float*) ((uintptr_t) o4 + input_width);
output_height = doz(output_height, 6);
} while (output_height != 0);
}
| 23,999 | 55.737589 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-aarch64-neonfma-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__aarch64_neonfma_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const uint32x4_t vmask_even = vld1q_u32(params->neon_stride2.mask_even);
const uint32x4_t vmask_odd = vld1q_u32(params->neon_stride2.mask_odd);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride2.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride2.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
float32x4_t vi0x1357 = vmovq_n_f32(0.0f);
float32x4_t vi1x1357 = vmovq_n_f32(0.0f);
float32x4_t vi2x1357 = vmovq_n_f32(0.0f);
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0); i0 += 8;
const float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1); i1 += 8;
const float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2); i2 += 8;
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE9BDF.val[0], vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x8ACE9BDF.val[0], vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x8ACE9BDF.val[0], vw89, 0);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x8ACE9BDF.val[1], 3);
vi0x1357 = vi0x8ACE9BDF.val[1];
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x8ACE9BDF.val[1], 3);
vi1x1357 = vi1x8ACE9BDF.val[1];
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3);
vi2x1357 = vi2x8ACE9BDF.val[1];
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x79BD, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x79BD, vget_low_f32(vw4567), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x79BD, vget_high_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x8ACE9BDF.val[1], vget_high_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x8ACE9BDF.val[1], vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x8ACE9BDF.val[1], vw89, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Last block has 0-7 pixels to process.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0);
const float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1);
const float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2);
const float32x4_t vi0x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[0])));
const float32x4_t vi0x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[1])));
const float32x4_t vi1x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[0])));
const float32x4_t vi1x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[1])));
const float32x4_t vi2x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[0])));
const float32x4_t vi2x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[1])));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x8ACE, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x8ACE, vw89, 0);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x9BDF, 3);
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x9BDF, 3);
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x9BDF, 3);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x79BD, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x79BD, vget_low_f32(vw4567), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x79BD, vget_high_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x9BDF, vget_high_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x9BDF, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x9BDF, vw89, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
w += 1 * sizeof(float);
if (w & (8 * sizeof(float))) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (4 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (2 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 6,457 | 35.902857 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-aarch64-neonfma-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__aarch64_neonfma_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const uint32x4_t vmask_even = vld1q_u32(params->neon_stride2.mask_even);
const uint32x4_t vmask_odd = vld1q_u32(params->neon_stride2.mask_odd);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride2.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride2.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
float32x4_t vi0x1357 = vmovq_n_f32(0.0f);
float32x4_t vi1x1357 = vmovq_n_f32(0.0f);
float32x4_t vi2x1357 = vmovq_n_f32(0.0f);
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0); i0 += 8;
const float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1); i1 += 8;
const float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2); i2 += 8;
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE9BDF.val[0], vget_high_f32(vw0123), 0);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x8ACE9BDF.val[0], vget_low_f32(vw4567), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x8ACE9BDF.val[0], vw89, 0);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x8ACE9BDF.val[1], 3);
vi0x1357 = vi0x8ACE9BDF.val[1];
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x8ACE9BDF.val[1], 3);
vi1x1357 = vi1x8ACE9BDF.val[1];
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3);
vi2x1357 = vi2x8ACE9BDF.val[1];
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x79BD, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x79BD, vget_low_f32(vw4567), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x79BD, vget_high_f32(vw4567), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x8ACE9BDF.val[1], vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x8ACE9BDF.val[1], vget_high_f32(vw4567), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x8ACE9BDF.val[1], vw89, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Last block has 0-7 pixels to process.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0);
const float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1);
const float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2);
const float32x4_t vi0x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[0])));
const float32x4_t vi0x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[1])));
const float32x4_t vi1x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[0])));
const float32x4_t vi1x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[1])));
const float32x4_t vi2x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[0])));
const float32x4_t vi2x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[1])));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE, vget_high_f32(vw0123), 0);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x8ACE, vget_low_f32(vw4567), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x8ACE, vw89, 0);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x9BDF, 3);
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x9BDF, 3);
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x9BDF, 3);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x79BD, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x79BD, vget_low_f32(vw4567), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x79BD, vget_high_f32(vw4567), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x9BDF, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x9BDF, vget_high_f32(vw4567), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x9BDF, vw89, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
w += 1 * sizeof(float);
if (w & (8 * sizeof(float))) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (4 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (2 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 6,545 | 35.983051 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-aarch64-neonfma-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__aarch64_neonfma_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const uint32x4_t vmask_even = vld1q_u32(params->neon_stride2.mask_even);
const uint32x4_t vmask_odd = vld1q_u32(params->neon_stride2.mask_odd);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride2.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride2.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
float32x4_t vi0x1357 = vmovq_n_f32(0.0f);
float32x4_t vi1x1357 = vmovq_n_f32(0.0f);
float32x4_t vi2x1357 = vmovq_n_f32(0.0f);
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0); i0 += 8;
const float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1); i1 += 8;
const float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2); i2 += 8;
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE9BDF.val[0], vget_high_f32(vw0123), 0);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x8ACE9BDF.val[0], vget_low_f32(vw4567), 1);
float32x4_t vo0p3 = vmulq_lane_f32(vi2x8ACE9BDF.val[0], vw89, 0);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x8ACE9BDF.val[1], 3);
vi0x1357 = vi0x8ACE9BDF.val[1];
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x8ACE9BDF.val[1], 3);
vi1x1357 = vi1x8ACE9BDF.val[1];
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3);
vi2x1357 = vi2x8ACE9BDF.val[1];
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x79BD, vget_low_f32(vw0123), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi1x79BD, vget_low_f32(vw4567), 0);
vo0p3 = vfmaq_lane_f32(vo0p3, vi2x79BD, vget_high_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x8ACE9BDF.val[1], vget_high_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x8ACE9BDF.val[1], vget_high_f32(vw4567), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi2x8ACE9BDF.val[1], vw89, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Last block has 0-7 pixels to process.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0);
const float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1);
const float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2);
const float32x4_t vi0x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[0])));
const float32x4_t vi0x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[1])));
const float32x4_t vi1x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[0])));
const float32x4_t vi1x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[1])));
const float32x4_t vi2x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[0])));
const float32x4_t vi2x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[1])));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE, vget_high_f32(vw0123), 0);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x8ACE, vget_low_f32(vw4567), 1);
float32x4_t vo0p3 = vmulq_lane_f32(vi2x8ACE, vw89, 0);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x9BDF, 3);
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x9BDF, 3);
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x9BDF, 3);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x79BD, vget_low_f32(vw0123), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi1x79BD, vget_low_f32(vw4567), 0);
vo0p3 = vfmaq_lane_f32(vo0p3, vi2x79BD, vget_high_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x9BDF, vget_high_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x9BDF, vget_high_f32(vw4567), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi2x9BDF, vw89, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
w += 1 * sizeof(float);
if (w & (8 * sizeof(float))) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (4 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (2 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 6,633 | 36.061453 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-aarch64-neonfma-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__aarch64_neonfma_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const uint32x4_t vmask_even = vld1q_u32(params->neon_stride2.mask_even);
const uint32x4_t vmask_odd = vld1q_u32(params->neon_stride2.mask_odd);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride2.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride2.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
float32x4_t vi0x1357 = vmovq_n_f32(0.0f);
float32x4_t vi1x1357 = vmovq_n_f32(0.0f);
float32x4_t vi2x1357 = vmovq_n_f32(0.0f);
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0); i0 += 8;
const float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1); i1 += 8;
const float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2); i2 += 8;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x8ACE9BDF.val[0], vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x8ACE9BDF.val[0], vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x8ACE9BDF.val[0], vw89, 0);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x8ACE9BDF.val[1], 3);
vi0x1357 = vi0x8ACE9BDF.val[1];
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x8ACE9BDF.val[1], 3);
vi1x1357 = vi1x8ACE9BDF.val[1];
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3);
vi2x1357 = vi2x8ACE9BDF.val[1];
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x79BD, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x79BD, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x79BD, vget_high_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x8ACE9BDF.val[1], vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x8ACE9BDF.val[1], vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x8ACE9BDF.val[1], vw89, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Last block has 0-7 pixels to process.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0);
const float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1);
const float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2);
const float32x4_t vi0x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[0])));
const float32x4_t vi0x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[1])));
const float32x4_t vi1x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[0])));
const float32x4_t vi1x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[1])));
const float32x4_t vi2x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[0])));
const float32x4_t vi2x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[1])));
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x8ACE, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x8ACE, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x8ACE, vw89, 0);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x9BDF, 3);
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x9BDF, 3);
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x9BDF, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x79BD, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x79BD, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x79BD, vget_high_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x9BDF, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x9BDF, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x9BDF, vw89, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
w += 1 * sizeof(float);
if (w & (8 * sizeof(float))) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (4 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (2 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 6,364 | 35.791908 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-neon-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const uint32x4_t vmask_even = vld1q_u32(params->neon_stride2.mask_even);
const uint32x4_t vmask_odd = vld1q_u32(params->neon_stride2.mask_odd);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride2.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride2.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
float32x4_t vi0x1357 = vmovq_n_f32(0.0f);
float32x4_t vi1x1357 = vmovq_n_f32(0.0f);
float32x4_t vi2x1357 = vmovq_n_f32(0.0f);
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0); i0 += 8;
const float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1); i1 += 8;
const float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2); i2 += 8;
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE9BDF.val[0], vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x8ACE9BDF.val[0], vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x8ACE9BDF.val[0], vw89, 0);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x8ACE9BDF.val[1], 3);
vi0x1357 = vi0x8ACE9BDF.val[1];
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x8ACE9BDF.val[1], 3);
vi1x1357 = vi1x8ACE9BDF.val[1];
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3);
vi2x1357 = vi2x8ACE9BDF.val[1];
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x79BD, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x79BD, vget_low_f32(vw4567), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x79BD, vget_high_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x8ACE9BDF.val[1], vget_high_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x8ACE9BDF.val[1], vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x8ACE9BDF.val[1], vw89, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Last block has 0-7 pixels to process.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0);
const float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1);
const float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2);
const float32x4_t vi0x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[0])));
const float32x4_t vi0x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[1])));
const float32x4_t vi1x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[0])));
const float32x4_t vi1x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[1])));
const float32x4_t vi2x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[0])));
const float32x4_t vi2x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[1])));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x8ACE, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x8ACE, vw89, 0);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x9BDF, 3);
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x9BDF, 3);
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x9BDF, 3);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x79BD, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x79BD, vget_low_f32(vw4567), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x79BD, vget_high_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x9BDF, vget_high_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x9BDF, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x9BDF, vw89, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
w += 1 * sizeof(float);
if (w & (8 * sizeof(float))) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (4 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (2 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 6,446 | 35.84 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-neon-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const uint32x4_t vmask_even = vld1q_u32(params->neon_stride2.mask_even);
const uint32x4_t vmask_odd = vld1q_u32(params->neon_stride2.mask_odd);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride2.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride2.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
float32x4_t vi0x1357 = vmovq_n_f32(0.0f);
float32x4_t vi1x1357 = vmovq_n_f32(0.0f);
float32x4_t vi2x1357 = vmovq_n_f32(0.0f);
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0); i0 += 8;
const float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1); i1 += 8;
const float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2); i2 += 8;
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE9BDF.val[0], vget_high_f32(vw0123), 0);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x8ACE9BDF.val[0], vget_low_f32(vw4567), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x8ACE9BDF.val[0], vw89, 0);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x8ACE9BDF.val[1], 3);
vi0x1357 = vi0x8ACE9BDF.val[1];
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x8ACE9BDF.val[1], 3);
vi1x1357 = vi1x8ACE9BDF.val[1];
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3);
vi2x1357 = vi2x8ACE9BDF.val[1];
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x79BD, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x79BD, vget_low_f32(vw4567), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x79BD, vget_high_f32(vw4567), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x8ACE9BDF.val[1], vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x8ACE9BDF.val[1], vget_high_f32(vw4567), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x8ACE9BDF.val[1], vw89, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Last block has 0-7 pixels to process.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0);
const float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1);
const float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2);
const float32x4_t vi0x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[0])));
const float32x4_t vi0x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[1])));
const float32x4_t vi1x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[0])));
const float32x4_t vi1x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[1])));
const float32x4_t vi2x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[0])));
const float32x4_t vi2x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[1])));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE, vget_high_f32(vw0123), 0);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x8ACE, vget_low_f32(vw4567), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x8ACE, vw89, 0);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x9BDF, 3);
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x9BDF, 3);
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x9BDF, 3);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x79BD, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x79BD, vget_low_f32(vw4567), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x79BD, vget_high_f32(vw4567), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x9BDF, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x9BDF, vget_high_f32(vw4567), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x9BDF, vw89, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
w += 1 * sizeof(float);
if (w & (8 * sizeof(float))) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (4 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (2 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 6,534 | 35.920904 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-neon-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const uint32x4_t vmask_even = vld1q_u32(params->neon_stride2.mask_even);
const uint32x4_t vmask_odd = vld1q_u32(params->neon_stride2.mask_odd);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride2.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride2.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
float32x4_t vi0x1357 = vmovq_n_f32(0.0f);
float32x4_t vi1x1357 = vmovq_n_f32(0.0f);
float32x4_t vi2x1357 = vmovq_n_f32(0.0f);
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0); i0 += 8;
const float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1); i1 += 8;
const float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2); i2 += 8;
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE9BDF.val[0], vget_high_f32(vw0123), 0);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x8ACE9BDF.val[0], vget_low_f32(vw4567), 1);
float32x4_t vo0p3 = vmulq_lane_f32(vi2x8ACE9BDF.val[0], vw89, 0);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x8ACE9BDF.val[1], 3);
vi0x1357 = vi0x8ACE9BDF.val[1];
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x8ACE9BDF.val[1], 3);
vi1x1357 = vi1x8ACE9BDF.val[1];
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3);
vi2x1357 = vi2x8ACE9BDF.val[1];
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x79BD, vget_low_f32(vw0123), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi1x79BD, vget_low_f32(vw4567), 0);
vo0p3 = vmlaq_lane_f32(vo0p3, vi2x79BD, vget_high_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x8ACE9BDF.val[1], vget_high_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x8ACE9BDF.val[1], vget_high_f32(vw4567), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi2x8ACE9BDF.val[1], vw89, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Last block has 0-7 pixels to process.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0);
const float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1);
const float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2);
const float32x4_t vi0x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[0])));
const float32x4_t vi0x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[1])));
const float32x4_t vi1x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[0])));
const float32x4_t vi1x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[1])));
const float32x4_t vi2x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[0])));
const float32x4_t vi2x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[1])));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE, vget_high_f32(vw0123), 0);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x8ACE, vget_low_f32(vw4567), 1);
float32x4_t vo0p3 = vmulq_lane_f32(vi2x8ACE, vw89, 0);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x9BDF, 3);
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x9BDF, 3);
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x9BDF, 3);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x79BD, vget_low_f32(vw0123), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi1x79BD, vget_low_f32(vw4567), 0);
vo0p3 = vmlaq_lane_f32(vo0p3, vi2x79BD, vget_high_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x9BDF, vget_high_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x9BDF, vget_high_f32(vw4567), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi2x9BDF, vw89, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
w += 1 * sizeof(float);
if (w & (8 * sizeof(float))) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (4 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (2 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 6,622 | 36 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-neon-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const uint32x4_t vmask_even = vld1q_u32(params->neon_stride2.mask_even);
const uint32x4_t vmask_odd = vld1q_u32(params->neon_stride2.mask_odd);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride2.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride2.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x2_t vw89 = vld1_f32(weights + 8);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
float32x4_t vi0x1357 = vmovq_n_f32(0.0f);
float32x4_t vi1x1357 = vmovq_n_f32(0.0f);
float32x4_t vi2x1357 = vmovq_n_f32(0.0f);
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0); i0 += 8;
const float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1); i1 += 8;
const float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2); i2 += 8;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x8ACE9BDF.val[0], vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x8ACE9BDF.val[0], vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x8ACE9BDF.val[0], vw89, 0);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x8ACE9BDF.val[1], 3);
vi0x1357 = vi0x8ACE9BDF.val[1];
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x8ACE9BDF.val[1], 3);
vi1x1357 = vi1x8ACE9BDF.val[1];
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3);
vi2x1357 = vi2x8ACE9BDF.val[1];
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x79BD, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x79BD, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x79BD, vget_high_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x8ACE9BDF.val[1], vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x8ACE9BDF.val[1], vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x8ACE9BDF.val[1], vw89, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Last block has 0-7 pixels to process.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0);
const float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1);
const float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2);
const float32x4_t vi0x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[0])));
const float32x4_t vi0x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[1])));
const float32x4_t vi1x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[0])));
const float32x4_t vi1x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[1])));
const float32x4_t vi2x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[0])));
const float32x4_t vi2x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[1])));
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x8ACE, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x8ACE, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x8ACE, vw89, 0);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x9BDF, 3);
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x9BDF, 3);
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x9BDF, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x79BD, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x79BD, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x79BD, vget_high_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x9BDF, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x9BDF, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x9BDF, vw89, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
w += 1 * sizeof(float);
if (w & (8 * sizeof(float))) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (4 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (2 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 6,353 | 35.728324 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-scalar-1x1-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__scalar_1x1_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk10 = weights[4];
const float vk11 = weights[5];
const float vk12 = weights[6];
const float vk20 = weights[7];
const float vk21 = weights[8];
const float vk22 = weights[9];
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
size_t w = input_width;
for (; w >= 2 * sizeof(float); w -= 2 * sizeof(float)) {
const float vi0x1 = i0[0];
const float vi1x1 = i1[0];
const float vi2x1 = i2[0];
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
vo0p0 += vi2x0 * vk20;
const float vi0x2 = i0[1];
i0 += 2;
const float vi1x2 = i1[1];
i1 += 2;
const float vi2x2 = i2[1];
i2 += 2;
vo0p1 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p1 += vi2x1 * vk21;
vi0x0 = vi0x2;
vi1x0 = vi1x2;
vi2x0 = vi2x2;
vo0p0 += vi0x2 * vk02;
vo0p1 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p0 += vo0p1;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
// Potentially process the last pixel.
assert(w <= 1 * sizeof(float));
if (w != 0) {
const float vi0x1 = *i0++;
const float vi1x1 = *i1++;
const float vi2x1 = *i2++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo0p1 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo0p0 += vo0p1;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i1);
i1 = (const float*) ((uintptr_t) i2);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 3,453 | 24.211679 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-scalar-1x1-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__scalar_1x1_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk10 = weights[4];
const float vk11 = weights[5];
const float vk12 = weights[6];
const float vk20 = weights[7];
const float vk21 = weights[8];
const float vk22 = weights[9];
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
size_t w = input_width;
for (; w >= 2 * sizeof(float); w -= 2 * sizeof(float)) {
const float vi0x1 = i0[0];
const float vi1x1 = i1[0];
const float vi2x1 = i2[0];
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
const float vi0x2 = i0[1];
i0 += 2;
const float vi1x2 = i1[1];
i1 += 2;
const float vi2x2 = i2[1];
i2 += 2;
vo0p0 += vi0x1 * vk01;
vo0p1 += vi1x1 * vk11;
vo0p2 += vi2x1 * vk21;
vi0x0 = vi0x2;
vi1x0 = vi1x2;
vi2x0 = vi2x2;
vo0p0 += vi0x2 * vk02;
vo0p1 += vi1x2 * vk12;
vo0p2 += vi2x2 * vk22;
vo0p0 += vo0p1;
vo0p0 += vo0p2;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
// Potentially process the last pixel.
assert(w <= 1 * sizeof(float));
if (w != 0) {
const float vi0x1 = *i0++;
const float vi1x1 = *i1++;
const float vi2x1 = *i2++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
vo0p0 += vi0x1 * vk01;
vo0p1 += vi1x1 * vk11;
vo0p2 += vi2x1 * vk21;
vo0p0 += vo0p1;
vo0p0 += vo0p2;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i1);
i1 = (const float*) ((uintptr_t) i2);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 3,507 | 24.23741 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-scalar-1x1-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__scalar_1x1_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk10 = weights[4];
const float vk11 = weights[5];
const float vk12 = weights[6];
const float vk20 = weights[7];
const float vk21 = weights[8];
const float vk22 = weights[9];
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
size_t w = input_width;
for (; w >= 2 * sizeof(float); w -= 2 * sizeof(float)) {
const float vi0x1 = i0[0];
const float vi1x1 = i1[0];
const float vi2x1 = i2[0];
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
const float vi0x2 = i0[1];
i0 += 2;
const float vi1x2 = i1[1];
i1 += 2;
const float vi2x2 = i2[1];
i2 += 2;
float vo0p3 = vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p1 += vi2x1 * vk21;
vi0x0 = vi0x2;
vi1x0 = vi1x2;
vi2x0 = vi2x2;
vo0p2 += vi0x2 * vk02;
vo0p3 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p0 += vo0p1;
vo0p2 += vo0p3;
vo0p0 += vo0p2;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
// Potentially process the last pixel.
assert(w <= 1 * sizeof(float));
if (w != 0) {
const float vi0x1 = *i0++;
const float vi1x1 = *i1++;
const float vi2x1 = *i2++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
float vo0p3 = vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo0p0 += vo0p1;
vo0p2 += vo0p3;
vo0p0 += vo0p2;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i1);
i1 = (const float*) ((uintptr_t) i2);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 3,561 | 24.262411 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-scalar-1x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__scalar_1x1(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk10 = weights[4];
const float vk11 = weights[5];
const float vk12 = weights[6];
const float vk20 = weights[7];
const float vk21 = weights[8];
const float vk22 = weights[9];
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
size_t w = input_width;
for (; w >= 2 * sizeof(float); w -= 2 * sizeof(float)) {
const float vi0x1 = i0[0];
const float vi1x1 = i1[0];
const float vi2x1 = i2[0];
float vo0p0 = vbias + vi0x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo0p0 += vi2x0 * vk20;
const float vi0x2 = i0[1];
i0 += 2;
const float vi1x2 = i1[1];
i1 += 2;
const float vi2x2 = i2[1];
i2 += 2;
vo0p0 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p0 += vi2x1 * vk21;
vi0x0 = vi0x2;
vi1x0 = vi1x2;
vi2x0 = vi2x2;
vo0p0 += vi0x2 * vk02;
vo0p0 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
// Potentially process the last pixel.
assert(w <= 1 * sizeof(float));
if (w != 0) {
const float vi0x1 = *i0++;
const float vi1x1 = *i1++;
const float vi2x1 = *i2++;
float vo0p0 = vbias + vi0x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo0p0 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p0 += vi2x1 * vk21;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i1);
i1 = (const float*) ((uintptr_t) i2);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 3,394 | 24.148148 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-sse-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const __m128 vmask_even = _mm_load_ps((const float*) params->sse_stride2.mask_even);
const __m128 vmask_odd = _mm_load_ps((const float*) params->sse_stride2.mask_odd);
const __m128 vmax = _mm_load_ps(params->sse_stride2.max);
const __m128 vmin = _mm_load_ps(params->sse_stride2.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk10 = _mm_load1_ps(weights + 4);
const __m128 vk11 = _mm_load1_ps(weights + 5);
const __m128 vk12 = _mm_load1_ps(weights + 6);
const __m128 vk20 = _mm_load1_ps(weights + 7);
const __m128 vk21 = _mm_load1_ps(weights + 8);
const __m128 vk22 = _mm_load1_ps(weights + 9);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
__m128 vi0x7531 = _mm_setzero_ps();
__m128 vi1x7531 = _mm_setzero_ps();
__m128 vi2x7531 = _mm_setzero_ps();
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
const __m128 vi0x89AB = _mm_loadu_ps(i0);
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vi1x89AB = _mm_loadu_ps(i1);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vi2x89AB = _mm_loadu_ps(i2);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vi0x8ACE = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi0x9BDF = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 vi1x8ACE = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi1x9BDF = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 vi2x8ACE = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi2x9BDF = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
__m128 vo0p1 = _mm_mul_ps(vi1x8ACE, vk11);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x8ACE, vk21));
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x9BDF, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk12));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x9BDF, vk22));
const __m128 vi0x79BD = _mm_move_ss(vi0xF9BD, vi0x7531);
const __m128 vi1x79BD = _mm_move_ss(vi1xF9BD, vi1x7531);
const __m128 vi2x79BD = _mm_move_ss(vi2xF9BD, vi2x7531);
vi0x7531 = vi0xF9BD;
vi1x7531 = vi1xF9BD;
vi2x7531 = vi2xF9BD;
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x79BD, vk00));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x79BD, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x79BD, vk20));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Potentially process the last block of 0..7 pixels.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
const __m128 vi0x89AB = _mm_loadu_ps(i0);
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
const __m128 vi1x89AB = _mm_loadu_ps(i1);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
const __m128 vi2x89AB = _mm_loadu_ps(i2);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
const __m128 vi0x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
const __m128 vi0x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
const __m128 vi1x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
const __m128 vi1x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
const __m128 vi2x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
const __m128 vi2x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
__m128 vo0p1 = _mm_mul_ps(vi1x8ACE, vk11);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x8ACE, vk21));
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x9BDF, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk12));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x9BDF, vk22));
const __m128 vi0x79BD = _mm_move_ss(vi0xF9BD, vi0x7531);
const __m128 vi1x79BD = _mm_move_ss(vi1xF9BD, vi1x7531);
const __m128 vi2x79BD = _mm_move_ss(vi2xF9BD, vi2x7531);
vi0x7531 = vi0xF9BD;
vi1x7531 = vi1xF9BD;
vi2x7531 = vi2xF9BD;
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x79BD, vk00));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x79BD, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x79BD, vk20));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
if (w == 7 * sizeof(float)) {
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
w += 1 * sizeof(float);
if (w & (4 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w & (2 * sizeof(float))) {
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 7,700 | 37.893939 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-sse-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const __m128 vmask_even = _mm_load_ps((const float*) params->sse_stride2.mask_even);
const __m128 vmask_odd = _mm_load_ps((const float*) params->sse_stride2.mask_odd);
const __m128 vmax = _mm_load_ps(params->sse_stride2.max);
const __m128 vmin = _mm_load_ps(params->sse_stride2.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk10 = _mm_load1_ps(weights + 4);
const __m128 vk11 = _mm_load1_ps(weights + 5);
const __m128 vk12 = _mm_load1_ps(weights + 6);
const __m128 vk20 = _mm_load1_ps(weights + 7);
const __m128 vk21 = _mm_load1_ps(weights + 8);
const __m128 vk22 = _mm_load1_ps(weights + 9);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
__m128 vi0x7531 = _mm_setzero_ps();
__m128 vi1x7531 = _mm_setzero_ps();
__m128 vi2x7531 = _mm_setzero_ps();
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
const __m128 vi0x89AB = _mm_loadu_ps(i0);
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vi1x89AB = _mm_loadu_ps(i1);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vi2x89AB = _mm_loadu_ps(i2);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vi0x8ACE = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi0x9BDF = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 vi1x8ACE = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi1x9BDF = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 vi2x8ACE = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi2x9BDF = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
__m128 vo0p1 = _mm_mul_ps(vi1x8ACE, vk11);
__m128 vo0p2 = _mm_mul_ps(vi2x8ACE, vk21);
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x9BDF, vk02));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x9BDF, vk12));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x9BDF, vk22));
const __m128 vi0x79BD = _mm_move_ss(vi0xF9BD, vi0x7531);
const __m128 vi1x79BD = _mm_move_ss(vi1xF9BD, vi1x7531);
const __m128 vi2x79BD = _mm_move_ss(vi2xF9BD, vi2x7531);
vi0x7531 = vi0xF9BD;
vi1x7531 = vi1xF9BD;
vi2x7531 = vi2xF9BD;
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x79BD, vk00));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x79BD, vk10));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x79BD, vk20));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Potentially process the last block of 0..7 pixels.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
const __m128 vi0x89AB = _mm_loadu_ps(i0);
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
const __m128 vi1x89AB = _mm_loadu_ps(i1);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
const __m128 vi2x89AB = _mm_loadu_ps(i2);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
const __m128 vi0x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
const __m128 vi0x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
const __m128 vi1x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
const __m128 vi1x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
const __m128 vi2x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
const __m128 vi2x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
__m128 vo0p1 = _mm_mul_ps(vi1x8ACE, vk11);
__m128 vo0p2 = _mm_mul_ps(vi2x8ACE, vk21);
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x9BDF, vk02));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x9BDF, vk12));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x9BDF, vk22));
const __m128 vi0x79BD = _mm_move_ss(vi0xF9BD, vi0x7531);
const __m128 vi1x79BD = _mm_move_ss(vi1xF9BD, vi1x7531);
const __m128 vi2x79BD = _mm_move_ss(vi2xF9BD, vi2x7531);
vi0x7531 = vi0xF9BD;
vi1x7531 = vi1xF9BD;
vi2x7531 = vi2xF9BD;
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x79BD, vk00));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x79BD, vk10));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x79BD, vk20));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
if (w == 7 * sizeof(float)) {
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
w += 1 * sizeof(float);
if (w & (4 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w & (2 * sizeof(float))) {
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 7,756 | 37.785 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-sse-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const __m128 vmask_even = _mm_load_ps((const float*) params->sse_stride2.mask_even);
const __m128 vmask_odd = _mm_load_ps((const float*) params->sse_stride2.mask_odd);
const __m128 vmax = _mm_load_ps(params->sse_stride2.max);
const __m128 vmin = _mm_load_ps(params->sse_stride2.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk10 = _mm_load1_ps(weights + 4);
const __m128 vk11 = _mm_load1_ps(weights + 5);
const __m128 vk12 = _mm_load1_ps(weights + 6);
const __m128 vk20 = _mm_load1_ps(weights + 7);
const __m128 vk21 = _mm_load1_ps(weights + 8);
const __m128 vk22 = _mm_load1_ps(weights + 9);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
__m128 vi0x7531 = _mm_setzero_ps();
__m128 vi1x7531 = _mm_setzero_ps();
__m128 vi2x7531 = _mm_setzero_ps();
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
const __m128 vi0x89AB = _mm_loadu_ps(i0);
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vi1x89AB = _mm_loadu_ps(i1);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vi2x89AB = _mm_loadu_ps(i2);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vi0x8ACE = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi0x9BDF = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 vi1x8ACE = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi1x9BDF = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 vi2x8ACE = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi2x9BDF = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
__m128 vo0p1 = _mm_mul_ps(vi1x8ACE, vk11);
__m128 vo0p2 = _mm_mul_ps(vi2x8ACE, vk21);
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
__m128 vo0p3 = _mm_mul_ps(vi0x9BDF, vk02);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk12));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x9BDF, vk22));
const __m128 vi0x79BD = _mm_move_ss(vi0xF9BD, vi0x7531);
const __m128 vi1x79BD = _mm_move_ss(vi1xF9BD, vi1x7531);
const __m128 vi2x79BD = _mm_move_ss(vi2xF9BD, vi2x7531);
vi0x7531 = vi0xF9BD;
vi1x7531 = vi1xF9BD;
vi2x7531 = vi2xF9BD;
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x79BD, vk00));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi1x79BD, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x79BD, vk20));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p2 = _mm_add_ps(vo0p2, vo0p3);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Potentially process the last block of 0..7 pixels.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
const __m128 vi0x89AB = _mm_loadu_ps(i0);
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
const __m128 vi1x89AB = _mm_loadu_ps(i1);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
const __m128 vi2x89AB = _mm_loadu_ps(i2);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
const __m128 vi0x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
const __m128 vi0x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
const __m128 vi1x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
const __m128 vi1x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
const __m128 vi2x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
const __m128 vi2x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
__m128 vo0p1 = _mm_mul_ps(vi1x8ACE, vk11);
__m128 vo0p2 = _mm_mul_ps(vi2x8ACE, vk21);
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
__m128 vo0p3 = _mm_mul_ps(vi0x9BDF, vk02);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk12));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x9BDF, vk22));
const __m128 vi0x79BD = _mm_move_ss(vi0xF9BD, vi0x7531);
const __m128 vi1x79BD = _mm_move_ss(vi1xF9BD, vi1x7531);
const __m128 vi2x79BD = _mm_move_ss(vi2xF9BD, vi2x7531);
vi0x7531 = vi0xF9BD;
vi1x7531 = vi1xF9BD;
vi2x7531 = vi2xF9BD;
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x79BD, vk00));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi1x79BD, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x79BD, vk20));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p2 = _mm_add_ps(vo0p2, vo0p3);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
if (w == 7 * sizeof(float)) {
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
w += 1 * sizeof(float);
if (w & (4 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w & (2 * sizeof(float))) {
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 7,812 | 37.678218 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-sse-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const __m128 vmask_even = _mm_load_ps((const float*) params->sse_stride2.mask_even);
const __m128 vmask_odd = _mm_load_ps((const float*) params->sse_stride2.mask_odd);
const __m128 vmax = _mm_load_ps(params->sse_stride2.max);
const __m128 vmin = _mm_load_ps(params->sse_stride2.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk10 = _mm_load1_ps(weights + 4);
const __m128 vk11 = _mm_load1_ps(weights + 5);
const __m128 vk12 = _mm_load1_ps(weights + 6);
const __m128 vk20 = _mm_load1_ps(weights + 7);
const __m128 vk21 = _mm_load1_ps(weights + 8);
const __m128 vk22 = _mm_load1_ps(weights + 9);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
__m128 vi0x7531 = _mm_setzero_ps();
__m128 vi1x7531 = _mm_setzero_ps();
__m128 vi2x7531 = _mm_setzero_ps();
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
const __m128 vi0x89AB = _mm_loadu_ps(i0);
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vi1x89AB = _mm_loadu_ps(i1);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vi2x89AB = _mm_loadu_ps(i2);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vi0x8ACE = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi0x9BDF = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 vi1x8ACE = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi1x9BDF = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 vi2x8ACE = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi2x9BDF = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x8ACE, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x8ACE, vk21));
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x9BDF, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x9BDF, vk22));
const __m128 vi0x79BD = _mm_move_ss(vi0xF9BD, vi0x7531);
const __m128 vi1x79BD = _mm_move_ss(vi1xF9BD, vi1x7531);
const __m128 vi2x79BD = _mm_move_ss(vi2xF9BD, vi2x7531);
vi0x7531 = vi0xF9BD;
vi1x7531 = vi1xF9BD;
vi2x7531 = vi2xF9BD;
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x79BD, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x79BD, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x79BD, vk20));
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Potentially process the last block of 0..7 pixels.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
const __m128 vi0x89AB = _mm_loadu_ps(i0);
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
const __m128 vi1x89AB = _mm_loadu_ps(i1);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
const __m128 vi2x89AB = _mm_loadu_ps(i2);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
const __m128 vi0x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
const __m128 vi0x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
const __m128 vi1x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
const __m128 vi1x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
const __m128 vi2x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0)));
const __m128 vi2x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1)));
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x8ACE, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x8ACE, vk21));
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x9BDF, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x9BDF, vk22));
const __m128 vi0x79BD = _mm_move_ss(vi0xF9BD, vi0x7531);
const __m128 vi1x79BD = _mm_move_ss(vi1xF9BD, vi1x7531);
const __m128 vi2x79BD = _mm_move_ss(vi2xF9BD, vi2x7531);
vi0x7531 = vi0xF9BD;
vi1x7531 = vi1xF9BD;
vi2x7531 = vi2xF9BD;
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x79BD, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x79BD, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x79BD, vk20));
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
if (w == 7 * sizeof(float)) {
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
w += 1 * sizeof(float);
if (w & (4 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w & (2 * sizeof(float))) {
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 7,639 | 37.979592 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk10));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 0-7 pixels to process.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
const v128_t vi1x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk10));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
w += 1 * sizeof(float);
if (w & (8 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (4 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (2 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 7,793 | 37.776119 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk11);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, vk21));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk10));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk20));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 0-7 pixels to process.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
const v128_t vi1x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk11);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, vk21));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk10));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk20));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
w += 1 * sizeof(float);
if (w & (8 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (4 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (2 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 7,849 | 37.669951 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk11);
v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk21);
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk00));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, vk10));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, vk12));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x9BDF, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 0-7 pixels to process.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
const v128_t vi1x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk11);
v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk21);
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk00));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, vk10));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, vk12));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x9BDF, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
w += 1 * sizeof(float);
if (w & (8 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (4 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (2 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 7,905 | 37.565854 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 0-7 pixels to process.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
const v128_t vi1x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
w += 1 * sizeof(float);
if (w & (8 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (4 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (2 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 7,732 | 37.859296 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 0-7 pixels to process.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
const v128_t vi1x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
w += 1 * sizeof(float);
if (w & (8 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (4 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (2 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 7,914 | 40.439791 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 0-7 pixels to process.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
const v128_t vi1x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
w += 1 * sizeof(float);
if (w & (8 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (4 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (2 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 7,970 | 40.300518 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 0-7 pixels to process.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
const v128_t vi1x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
w += 1 * sizeof(float);
if (w & (8 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (4 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (2 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 8,026 | 40.164103 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-wasmsimd-arm-splat-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 0-7 pixels to process.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
const v128_t vi1x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
w += 1 * sizeof(float);
if (w & (8 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (4 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (2 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 7,853 | 40.555556 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk10));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 0-7 pixels to process.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
const v128_t vi1x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk10));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
w += 1 * sizeof(float);
if (w & (8 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (4 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (2 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 7,797 | 37.79602 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk11);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, vk21));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk10));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk20));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 0-7 pixels to process.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
const v128_t vi1x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk11);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, vk21));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk10));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk20));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
w += 1 * sizeof(float);
if (w & (8 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (4 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (2 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 7,853 | 37.689655 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk11);
v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk21);
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk00));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, vk10));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, vk12));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x9BDF, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 0-7 pixels to process.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
const v128_t vi1x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk11);
v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk21);
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk00));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, vk10));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, vk12));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x9BDF, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
w += 1 * sizeof(float);
if (w & (8 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (4 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (2 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 7,909 | 37.585366 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 0-7 pixels to process.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
const v128_t vi1x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
w += 1 * sizeof(float);
if (w & (8 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (4 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (2 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 7,736 | 37.879397 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 0-7 pixels to process.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
const v128_t vi1x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
w += 1 * sizeof(float);
if (w & (8 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (4 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (2 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 7,918 | 40.460733 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 0-7 pixels to process.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
const v128_t vi1x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
w += 1 * sizeof(float);
if (w & (8 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (4 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (2 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 7,974 | 40.321244 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 0-7 pixels to process.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
const v128_t vi1x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
w += 1 * sizeof(float);
if (w & (8 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (4 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (2 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 8,030 | 40.184615 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-3x3s2p1-minmax-wasmsimd-x86-splat-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 0);
assert(padding_top <= 1);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
if XNN_UNPREDICTABLE(padding_top != 0) {
i0 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 4) {
i2 = zero;
}
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
size_t w = input_width;
for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 0-7 pixels to process.
assert(w < 8 * sizeof(float));
if XNN_LIKELY(w != 0) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
const v128_t vi1x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
w += 1 * sizeof(float);
if (w & (8 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (4 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (2 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i0 + input_width);
i2 = (const float*) ((uintptr_t) i1 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 7,857 | 40.57672 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-aarch64-neonfma-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__aarch64_neonfma_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vget_low_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x3456, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x5678, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x6789, vget_low_f32(vw4567), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
vi0x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x89AB)));
vi1x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x89AB)));
vi2x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x89AB)));
vi3x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x89AB)));
vi4x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x89AB)));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vget_low_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x3456, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x5678, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x6789, vget_low_f32(vw4567), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vget_low_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x3456, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x5678, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x5678, vzero, 1);
const float32x4_t vi1x6789 = vextq_f32(vi1x5678, vzero, 1);
const float32x4_t vi2x6789 = vextq_f32(vi2x5678, vzero, 1);
const float32x4_t vi3x6789 = vextq_f32(vi3x5678, vzero, 1);
const float32x4_t vi4x6789 = vextq_f32(vi4x5678, vzero, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x6789, vget_low_f32(vw4567), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (2 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 15,684 | 37.824257 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-aarch64-neonfma-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__aarch64_neonfma_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x4567, vget_low_f32(vwCDEF), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x3456, vget_high_f32(vw0123), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi1x3456, vget_high_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x5678, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x6789, vget_low_f32(vw4567), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
vi0x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x89AB)));
vi1x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x89AB)));
vi2x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x89AB)));
vi3x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x89AB)));
vi4x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x89AB)));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x4567, vget_low_f32(vwCDEF), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x3456, vget_high_f32(vw0123), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi1x3456, vget_high_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x5678, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x6789, vget_low_f32(vw4567), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x4567, vget_low_f32(vwCDEF), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x3456, vget_high_f32(vw0123), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi1x3456, vget_high_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x5678, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x5678, vzero, 1);
const float32x4_t vi1x6789 = vextq_f32(vi1x5678, vzero, 1);
const float32x4_t vi2x6789 = vextq_f32(vi2x5678, vzero, 1);
const float32x4_t vi3x6789 = vextq_f32(vi3x5678, vzero, 1);
const float32x4_t vi4x6789 = vextq_f32(vi4x5678, vzero, 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x6789, vget_low_f32(vw4567), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (2 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 15,816 | 37.862408 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-aarch64-neonfma-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__aarch64_neonfma_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
float32x4_t vo0p3 = vmulq_lane_f32(vi2x4567, vget_low_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p3 = vfmaq_lane_f32(vo0p3, vi0x3456, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x5678, vget_low_f32(vw4567), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x6789, vget_low_f32(vw4567), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
vi0x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x89AB)));
vi1x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x89AB)));
vi2x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x89AB)));
vi3x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x89AB)));
vi4x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x89AB)));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
float32x4_t vo0p3 = vmulq_lane_f32(vi2x4567, vget_low_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p3 = vfmaq_lane_f32(vo0p3, vi0x3456, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x5678, vget_low_f32(vw4567), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x6789, vget_low_f32(vw4567), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
float32x4_t vo0p3 = vmulq_lane_f32(vi2x4567, vget_low_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p3 = vfmaq_lane_f32(vo0p3, vi0x3456, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x5678, vget_low_f32(vw4567), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x5678, vzero, 1);
const float32x4_t vi1x6789 = vextq_f32(vi1x5678, vzero, 1);
const float32x4_t vi2x6789 = vextq_f32(vi2x5678, vzero, 1);
const float32x4_t vi3x6789 = vextq_f32(vi3x5678, vzero, 1);
const float32x4_t vi4x6789 = vextq_f32(vi4x5678, vzero, 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x6789, vget_low_f32(vw4567), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (2 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 15,948 | 37.9 | 90 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.