repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-aarch64-neonfma-1x4-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__aarch64_neonfma_1x4_acc5(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
float32x4_t vo0p3 = vmulq_lane_f32(vi2x4567, vget_low_f32(vwCDEF), 1);
float32x4_t vo0p4 = vmulq_lane_f32(vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x3456, vget_high_f32(vw0123), 0);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1x3456, vget_high_f32(vw4567), 1);
vo0p4 = vfmaq_lane_f32(vo0p4, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x2345, vget_low_f32(vw0123), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1x2345, vget_high_f32(vw4567), 0);
vo0p4 = vfmaq_lane_f32(vo0p4, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x5678, vget_low_f32(vw4567), 0);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p4 = vfmaq_lane_f32(vo0p4, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x6789, vget_low_f32(vw4567), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p4 = vfmaq_lane_f32(vo0p4, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
vo0p0 = vaddq_f32(vo0p0, vo0p4);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
vi0x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x89AB)));
vi1x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x89AB)));
vi2x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x89AB)));
vi3x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x89AB)));
vi4x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x89AB)));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
float32x4_t vo0p3 = vmulq_lane_f32(vi2x4567, vget_low_f32(vwCDEF), 1);
float32x4_t vo0p4 = vmulq_lane_f32(vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x3456, vget_high_f32(vw0123), 0);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1x3456, vget_high_f32(vw4567), 1);
vo0p4 = vfmaq_lane_f32(vo0p4, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x2345, vget_low_f32(vw0123), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1x2345, vget_high_f32(vw4567), 0);
vo0p4 = vfmaq_lane_f32(vo0p4, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x5678, vget_low_f32(vw4567), 0);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p4 = vfmaq_lane_f32(vo0p4, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x6789, vget_low_f32(vw4567), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p4 = vfmaq_lane_f32(vo0p4, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
vo0p0 = vaddq_f32(vo0p0, vo0p4);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
float32x4_t vo0p3 = vmulq_lane_f32(vi2x4567, vget_low_f32(vwCDEF), 1);
float32x4_t vo0p4 = vmulq_lane_f32(vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x3456, vget_high_f32(vw0123), 0);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1x3456, vget_high_f32(vw4567), 1);
vo0p4 = vfmaq_lane_f32(vo0p4, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x2345, vget_low_f32(vw0123), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1x2345, vget_high_f32(vw4567), 0);
vo0p4 = vfmaq_lane_f32(vo0p4, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x5678, vget_low_f32(vw4567), 0);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p4 = vfmaq_lane_f32(vo0p4, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x5678, vzero, 1);
const float32x4_t vi1x6789 = vextq_f32(vi1x5678, vzero, 1);
const float32x4_t vi2x6789 = vextq_f32(vi2x5678, vzero, 1);
const float32x4_t vi3x6789 = vextq_f32(vi3x5678, vzero, 1);
const float32x4_t vi4x6789 = vextq_f32(vi4x5678, vzero, 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x6789, vget_low_f32(vw4567), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p4 = vfmaq_lane_f32(vo0p4, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
vo0p0 = vaddq_f32(vo0p0, vo0p4);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (2 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 16,080 | 37.937046 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-aarch64-neonfma-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__aarch64_neonfma_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vget_low_f32(vwCDEF), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x3456, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x2345, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x5678, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x6789, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x6789, vwOP, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
vi0x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x89AB)));
vi1x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x89AB)));
vi2x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x89AB)));
vi3x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x89AB)));
vi4x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x89AB)));
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vget_low_f32(vwCDEF), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x3456, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x2345, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x5678, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x6789, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x6789, vwOP, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vget_low_f32(vwCDEF), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x3456, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x2345, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x5678, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x5678, vzero, 1);
const float32x4_t vi1x6789 = vextq_f32(vi1x5678, vzero, 1);
const float32x4_t vi2x6789 = vextq_f32(vi2x5678, vzero, 1);
const float32x4_t vi3x6789 = vextq_f32(vi3x5678, vzero, 1);
const float32x4_t vi4x6789 = vextq_f32(vi4x5678, vzero, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x6789, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x6789, vwOP, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (2 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 15,547 | 37.773067 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-aarch64-neonfma-2x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__aarch64_neonfma_2x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i5 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
float32x4_t vi5x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5x89AB = vld1q_f32(i5); i5 += 4;
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo1p1 = vmulq_lane_f32(vi1x4567, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw89AB), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vget_low_f32(vwCDEF), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x4567, vget_low_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi4x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x3456, vget_high_f32(vw0123), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi1x3456, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x3456, vget_high_f32(vw4567), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x3456, vget_low_f32(vwCDEF), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi3x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi4x3456, vget_low_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x3456, vget_high_f32(vwKLMN), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi5x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
const float32x4_t vi5x2345 = vextq_f32(vi5x0123, vi5x4567, 2);
vi5x0123 = vi5x4567;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x2345, vget_low_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi2x2345, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x2345, vget_high_f32(vw89AB), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x2345, vget_high_f32(vw89AB), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi4x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vi5x89AB, 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x5678, vget_low_f32(vw4567), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi1x5678, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x5678, vget_low_f32(vw89AB), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x5678, vget_high_f32(vwCDEF), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi3x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi4x5678, vget_high_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi5x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
const float32x4_t vi5x6789 = vextq_f32(vi5x4567, vi5x89AB, 2);
vi5x4567 = vi5x89AB;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x6789, vget_low_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x6789, vget_low_f32(vw4567), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x6789, vget_high_f32(vw89AB), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi2x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi4x6789, vget_low_f32(vwKLMN), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x6789, vwOP, 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo1p0 = vaddq_f32(vo1p0, vo1p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
float32x4_t vi5x89AB = vld1q_f32(i5); i5 += 4;
vi0x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x89AB)));
vi1x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x89AB)));
vi2x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x89AB)));
vi3x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x89AB)));
vi4x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x89AB)));
vi5x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi5x89AB)));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo1p1 = vmulq_lane_f32(vi1x4567, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw89AB), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vget_low_f32(vwCDEF), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x4567, vget_low_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi4x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x3456, vget_high_f32(vw0123), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi1x3456, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x3456, vget_high_f32(vw4567), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x3456, vget_low_f32(vwCDEF), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi3x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi4x3456, vget_low_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x3456, vget_high_f32(vwKLMN), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi5x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
const float32x4_t vi5x2345 = vextq_f32(vi5x0123, vi5x4567, 2);
vi5x0123 = vi5x4567;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x2345, vget_low_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi2x2345, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x2345, vget_high_f32(vw89AB), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x2345, vget_high_f32(vw89AB), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi4x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vi5x89AB, 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x5678, vget_low_f32(vw4567), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi1x5678, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x5678, vget_low_f32(vw89AB), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x5678, vget_high_f32(vwCDEF), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi3x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi4x5678, vget_high_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi5x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
const float32x4_t vi5x6789 = vextq_f32(vi5x4567, vi5x89AB, 2);
vi5x4567 = vi5x89AB;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x6789, vget_low_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x6789, vget_low_f32(vw4567), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x6789, vget_high_f32(vw89AB), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi2x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi4x6789, vget_low_f32(vwKLMN), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x6789, vwOP, 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo1p0 = vaddq_f32(vo1p0, vo1p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
vi5x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi5x4567)));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo1p1 = vmulq_lane_f32(vi1x4567, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw89AB), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vget_low_f32(vwCDEF), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x4567, vget_low_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi4x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x3456, vget_high_f32(vw0123), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi1x3456, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x3456, vget_high_f32(vw4567), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x3456, vget_low_f32(vwCDEF), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi3x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi4x3456, vget_low_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x3456, vget_high_f32(vwKLMN), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi5x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
const float32x4_t vi5x2345 = vextq_f32(vi5x0123, vi5x4567, 2);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x2345, vget_low_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi2x2345, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x2345, vget_high_f32(vw89AB), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x2345, vget_high_f32(vw89AB), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi4x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vzero, 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x5678, vget_low_f32(vw4567), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi1x5678, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x5678, vget_low_f32(vw89AB), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x5678, vget_high_f32(vwCDEF), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi3x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi4x5678, vget_high_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi5x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x5678, vzero, 1);
const float32x4_t vi1x6789 = vextq_f32(vi1x5678, vzero, 1);
const float32x4_t vi2x6789 = vextq_f32(vi2x5678, vzero, 1);
const float32x4_t vi3x6789 = vextq_f32(vi3x5678, vzero, 1);
const float32x4_t vi4x6789 = vextq_f32(vi4x5678, vzero, 1);
const float32x4_t vi5x6789 = vextq_f32(vi5x5678, vzero, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x6789, vget_low_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x6789, vget_low_f32(vw4567), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x6789, vget_high_f32(vw89AB), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi2x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi4x6789, vget_low_f32(vwKLMN), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x6789, vwOP, 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo1p0 = vaddq_f32(vo1p0, vo1p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
float32x2_t vo1_lo = vget_low_f32(vo1);
if (w & (2 * sizeof(float))) {
vst1_f32(o1, vo1_lo); o1 += 2;
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
vo1_lo = vget_high_f32(vo1);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o1, vo1_lo, 0); o1 += 1;
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 23,574 | 43.481132 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-aarch64-neonfma-2x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__aarch64_neonfma_2x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i5 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
float32x4_t vi5x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5x89AB = vld1q_f32(i5); i5 += 4;
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo1p1 = vmulq_lane_f32(vi1x4567, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
float32x4_t vo1p2 = vmulq_lane_f32(vi2x4567, vget_low_f32(vw89AB), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x4567, vget_low_f32(vwCDEF), 1);
vo1p1 = vfmaq_lane_f32(vo1p1, vi3x4567, vget_low_f32(vwCDEF), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo1p2 = vfmaq_lane_f32(vo1p2, vi4x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x3456, vget_high_f32(vw0123), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi1x3456, vget_high_f32(vw0123), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi1x3456, vget_high_f32(vw4567), 1);
vo1p2 = vfmaq_lane_f32(vo1p2, vi2x3456, vget_high_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_low_f32(vwCDEF), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x3456, vget_low_f32(vwCDEF), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo1p1 = vfmaq_lane_f32(vo1p1, vi4x3456, vget_low_f32(vwGHIJ), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4x3456, vget_high_f32(vwKLMN), 0);
vo1p2 = vfmaq_lane_f32(vo1p2, vi5x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
const float32x4_t vi5x2345 = vextq_f32(vi5x0123, vi5x4567, 2);
vi5x0123 = vi5x4567;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x2345, vget_low_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi2x2345, vget_high_f32(vw4567), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi2x2345, vget_high_f32(vw89AB), 1);
vo1p2 = vfmaq_lane_f32(vo1p2, vi3x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi4x2345, vget_low_f32(vwGHIJ), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x2345, vget_low_f32(vwKLMN), 1);
vo1p1 = vfmaq_lane_f32(vo1p1, vi5x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vi5x89AB, 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x5678, vget_low_f32(vw4567), 0);
vo1p2 = vfmaq_lane_f32(vo1p2, vi1x5678, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x5678, vget_low_f32(vw89AB), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x5678, vget_high_f32(vwCDEF), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi3x5678, vget_high_f32(vwCDEF), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo1p2 = vfmaq_lane_f32(vo1p2, vi4x5678, vget_high_f32(vwGHIJ), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x5678, vwOP, 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
const float32x4_t vi5x6789 = vextq_f32(vi5x4567, vi5x89AB, 2);
vi5x4567 = vi5x89AB;
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x6789, vget_low_f32(vw4567), 1);
vo1p1 = vfmaq_lane_f32(vo1p1, vi1x6789, vget_low_f32(vw4567), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi1x6789, vget_high_f32(vw89AB), 0);
vo1p2 = vfmaq_lane_f32(vo1p2, vi2x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi4x6789, vget_low_f32(vwKLMN), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4x6789, vwOP, 1);
vo1p2 = vfmaq_lane_f32(vo1p2, vi5x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo1p0 = vaddq_f32(vo1p0, vo1p1);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
vo1p0 = vaddq_f32(vo1p0, vo1p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
float32x4_t vi5x89AB = vld1q_f32(i5); i5 += 4;
vi0x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x89AB)));
vi1x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x89AB)));
vi2x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x89AB)));
vi3x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x89AB)));
vi4x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x89AB)));
vi5x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi5x89AB)));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo1p1 = vmulq_lane_f32(vi1x4567, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
float32x4_t vo1p2 = vmulq_lane_f32(vi2x4567, vget_low_f32(vw89AB), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x4567, vget_low_f32(vwCDEF), 1);
vo1p1 = vfmaq_lane_f32(vo1p1, vi3x4567, vget_low_f32(vwCDEF), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo1p2 = vfmaq_lane_f32(vo1p2, vi4x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x3456, vget_high_f32(vw0123), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi1x3456, vget_high_f32(vw0123), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi1x3456, vget_high_f32(vw4567), 1);
vo1p2 = vfmaq_lane_f32(vo1p2, vi2x3456, vget_high_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_low_f32(vwCDEF), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x3456, vget_low_f32(vwCDEF), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo1p1 = vfmaq_lane_f32(vo1p1, vi4x3456, vget_low_f32(vwGHIJ), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4x3456, vget_high_f32(vwKLMN), 0);
vo1p2 = vfmaq_lane_f32(vo1p2, vi5x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
const float32x4_t vi5x2345 = vextq_f32(vi5x0123, vi5x4567, 2);
vi5x0123 = vi5x4567;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x2345, vget_low_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi2x2345, vget_high_f32(vw4567), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi2x2345, vget_high_f32(vw89AB), 1);
vo1p2 = vfmaq_lane_f32(vo1p2, vi3x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi4x2345, vget_low_f32(vwGHIJ), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x2345, vget_low_f32(vwKLMN), 1);
vo1p1 = vfmaq_lane_f32(vo1p1, vi5x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vi5x89AB, 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x5678, vget_low_f32(vw4567), 0);
vo1p2 = vfmaq_lane_f32(vo1p2, vi1x5678, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x5678, vget_low_f32(vw89AB), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x5678, vget_high_f32(vwCDEF), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi3x5678, vget_high_f32(vwCDEF), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo1p2 = vfmaq_lane_f32(vo1p2, vi4x5678, vget_high_f32(vwGHIJ), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x5678, vwOP, 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
const float32x4_t vi5x6789 = vextq_f32(vi5x4567, vi5x89AB, 2);
vi5x4567 = vi5x89AB;
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x6789, vget_low_f32(vw4567), 1);
vo1p1 = vfmaq_lane_f32(vo1p1, vi1x6789, vget_low_f32(vw4567), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi1x6789, vget_high_f32(vw89AB), 0);
vo1p2 = vfmaq_lane_f32(vo1p2, vi2x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi4x6789, vget_low_f32(vwKLMN), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4x6789, vwOP, 1);
vo1p2 = vfmaq_lane_f32(vo1p2, vi5x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo1p0 = vaddq_f32(vo1p0, vo1p1);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
vo1p0 = vaddq_f32(vo1p0, vo1p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
vi5x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi5x4567)));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo1p1 = vmulq_lane_f32(vi1x4567, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
float32x4_t vo1p2 = vmulq_lane_f32(vi2x4567, vget_low_f32(vw89AB), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x4567, vget_low_f32(vwCDEF), 1);
vo1p1 = vfmaq_lane_f32(vo1p1, vi3x4567, vget_low_f32(vwCDEF), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo1p2 = vfmaq_lane_f32(vo1p2, vi4x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x3456, vget_high_f32(vw0123), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi1x3456, vget_high_f32(vw0123), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi1x3456, vget_high_f32(vw4567), 1);
vo1p2 = vfmaq_lane_f32(vo1p2, vi2x3456, vget_high_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_low_f32(vwCDEF), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x3456, vget_low_f32(vwCDEF), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo1p1 = vfmaq_lane_f32(vo1p1, vi4x3456, vget_low_f32(vwGHIJ), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4x3456, vget_high_f32(vwKLMN), 0);
vo1p2 = vfmaq_lane_f32(vo1p2, vi5x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
const float32x4_t vi5x2345 = vextq_f32(vi5x0123, vi5x4567, 2);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x2345, vget_low_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi2x2345, vget_high_f32(vw4567), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi2x2345, vget_high_f32(vw89AB), 1);
vo1p2 = vfmaq_lane_f32(vo1p2, vi3x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi4x2345, vget_low_f32(vwGHIJ), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x2345, vget_low_f32(vwKLMN), 1);
vo1p1 = vfmaq_lane_f32(vo1p1, vi5x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vzero, 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x5678, vget_low_f32(vw4567), 0);
vo1p2 = vfmaq_lane_f32(vo1p2, vi1x5678, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x5678, vget_low_f32(vw89AB), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x5678, vget_high_f32(vwCDEF), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi3x5678, vget_high_f32(vwCDEF), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo1p2 = vfmaq_lane_f32(vo1p2, vi4x5678, vget_high_f32(vwGHIJ), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x5678, vwOP, 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x5678, vzero, 1);
const float32x4_t vi1x6789 = vextq_f32(vi1x5678, vzero, 1);
const float32x4_t vi2x6789 = vextq_f32(vi2x5678, vzero, 1);
const float32x4_t vi3x6789 = vextq_f32(vi3x5678, vzero, 1);
const float32x4_t vi4x6789 = vextq_f32(vi4x5678, vzero, 1);
const float32x4_t vi5x6789 = vextq_f32(vi5x5678, vzero, 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x6789, vget_low_f32(vw4567), 1);
vo1p1 = vfmaq_lane_f32(vo1p1, vi1x6789, vget_low_f32(vw4567), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi1x6789, vget_high_f32(vw89AB), 0);
vo1p2 = vfmaq_lane_f32(vo1p2, vi2x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo1p1 = vfmaq_lane_f32(vo1p1, vi4x6789, vget_low_f32(vwKLMN), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4x6789, vwOP, 1);
vo1p2 = vfmaq_lane_f32(vo1p2, vi5x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo1p0 = vaddq_f32(vo1p0, vo1p1);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
vo1p0 = vaddq_f32(vo1p0, vo1p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
float32x2_t vo1_lo = vget_low_f32(vo1);
if (w & (2 * sizeof(float))) {
vst1_f32(o1, vo1_lo); o1 += 2;
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
vo1_lo = vget_high_f32(vo1);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o1, vo1_lo, 0); o1 += 1;
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 23,838 | 43.475746 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-aarch64-neonfma-2x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__aarch64_neonfma_2x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i5 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
float32x4_t vi5x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5x89AB = vld1q_f32(i5); i5 += 4;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw89AB), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vget_low_f32(vwCDEF), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x4567, vget_low_f32(vwCDEF), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi4x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x3456, vget_high_f32(vw0123), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x3456, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x3456, vget_high_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_low_f32(vwCDEF), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi4x3456, vget_low_f32(vwGHIJ), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x3456, vget_high_f32(vwKLMN), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
const float32x4_t vi5x2345 = vextq_f32(vi5x0123, vi5x4567, 2);
vi5x0123 = vi5x4567;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x2345, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x2345, vget_high_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x2345, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x2345, vget_high_f32(vw89AB), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi4x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vi5x89AB, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x5678, vget_low_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x5678, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x5678, vget_low_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x5678, vget_high_f32(vwCDEF), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi4x5678, vget_high_f32(vwGHIJ), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x5678, vwOP, 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
const float32x4_t vi5x6789 = vextq_f32(vi5x4567, vi5x89AB, 2);
vi5x4567 = vi5x89AB;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x6789, vget_low_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x6789, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x6789, vget_high_f32(vw89AB), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x6789, vget_high_f32(vwCDEF), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x6789, vget_low_f32(vwKLMN), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi4x6789, vget_low_f32(vwKLMN), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x6789, vwOP, 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x6789, vwOP, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
float32x4_t vi5x89AB = vld1q_f32(i5); i5 += 4;
vi0x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x89AB)));
vi1x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x89AB)));
vi2x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x89AB)));
vi3x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x89AB)));
vi4x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x89AB)));
vi5x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi5x89AB)));
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw89AB), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vget_low_f32(vwCDEF), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x4567, vget_low_f32(vwCDEF), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi4x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x3456, vget_high_f32(vw0123), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x3456, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x3456, vget_high_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_low_f32(vwCDEF), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi4x3456, vget_low_f32(vwGHIJ), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x3456, vget_high_f32(vwKLMN), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
const float32x4_t vi5x2345 = vextq_f32(vi5x0123, vi5x4567, 2);
vi5x0123 = vi5x4567;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x2345, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x2345, vget_high_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x2345, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x2345, vget_high_f32(vw89AB), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi4x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vi5x89AB, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x5678, vget_low_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x5678, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x5678, vget_low_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x5678, vget_high_f32(vwCDEF), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi4x5678, vget_high_f32(vwGHIJ), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x5678, vwOP, 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
const float32x4_t vi5x6789 = vextq_f32(vi5x4567, vi5x89AB, 2);
vi5x4567 = vi5x89AB;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x6789, vget_low_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x6789, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x6789, vget_high_f32(vw89AB), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x6789, vget_high_f32(vwCDEF), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x6789, vget_low_f32(vwKLMN), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi4x6789, vget_low_f32(vwKLMN), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x6789, vwOP, 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x6789, vwOP, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
vi5x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi5x4567)));
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw89AB), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x4567, vget_low_f32(vwCDEF), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x4567, vget_low_f32(vwCDEF), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi4x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x3456, vget_high_f32(vw0123), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x3456, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x3456, vget_high_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_low_f32(vwCDEF), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi4x3456, vget_low_f32(vwGHIJ), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x3456, vget_high_f32(vwKLMN), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
const float32x4_t vi5x2345 = vextq_f32(vi5x0123, vi5x4567, 2);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x2345, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x2345, vget_high_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x2345, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x2345, vget_high_f32(vw89AB), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi4x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vzero, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x5678, vget_low_f32(vw4567), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x5678, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x5678, vget_low_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x5678, vget_high_f32(vwCDEF), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi4x5678, vget_high_f32(vwGHIJ), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x5678, vwOP, 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x5678, vzero, 1);
const float32x4_t vi1x6789 = vextq_f32(vi1x5678, vzero, 1);
const float32x4_t vi2x6789 = vextq_f32(vi2x5678, vzero, 1);
const float32x4_t vi3x6789 = vextq_f32(vi3x5678, vzero, 1);
const float32x4_t vi4x6789 = vextq_f32(vi4x5678, vzero, 1);
const float32x4_t vi5x6789 = vextq_f32(vi5x5678, vzero, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x6789, vget_low_f32(vw4567), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi1x6789, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x6789, vget_high_f32(vw89AB), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi2x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi3x6789, vget_high_f32(vwCDEF), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x6789, vget_low_f32(vwKLMN), 0);
vo1p0 = vfmaq_lane_f32(vo1p0, vi4x6789, vget_low_f32(vwKLMN), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x6789, vwOP, 1);
vo1p0 = vfmaq_lane_f32(vo1p0, vi5x6789, vwOP, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
float32x2_t vo1_lo = vget_low_f32(vo1);
if (w & (2 * sizeof(float))) {
vst1_f32(o1, vo1_lo); o1 += 2;
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
vo1_lo = vget_high_f32(vo1);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o1, vo1_lo, 0); o1 += 1;
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 23,305 | 43.477099 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-neon-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vget_low_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x3456, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x5678, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x6789, vget_low_f32(vw4567), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
vi0x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x89AB)));
vi1x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x89AB)));
vi2x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x89AB)));
vi3x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x89AB)));
vi4x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x89AB)));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vget_low_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x3456, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x5678, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x6789, vget_low_f32(vw4567), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vget_low_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x3456, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x5678, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x5678, vzero, 1);
const float32x4_t vi1x6789 = vextq_f32(vi1x5678, vzero, 1);
const float32x4_t vi2x6789 = vextq_f32(vi2x5678, vzero, 1);
const float32x4_t vi3x6789 = vextq_f32(vi3x5678, vzero, 1);
const float32x4_t vi4x6789 = vextq_f32(vi4x5678, vzero, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x6789, vget_low_f32(vw4567), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (2 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 15,673 | 37.79703 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-neon-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x4567, vget_low_f32(vwCDEF), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x3456, vget_high_f32(vw0123), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi1x3456, vget_high_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x5678, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x6789, vget_low_f32(vw4567), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
vi0x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x89AB)));
vi1x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x89AB)));
vi2x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x89AB)));
vi3x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x89AB)));
vi4x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x89AB)));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x4567, vget_low_f32(vwCDEF), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x3456, vget_high_f32(vw0123), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi1x3456, vget_high_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x5678, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x6789, vget_low_f32(vw4567), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x4567, vget_low_f32(vwCDEF), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x3456, vget_high_f32(vw0123), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi1x3456, vget_high_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x5678, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x5678, vzero, 1);
const float32x4_t vi1x6789 = vextq_f32(vi1x5678, vzero, 1);
const float32x4_t vi2x6789 = vextq_f32(vi2x5678, vzero, 1);
const float32x4_t vi3x6789 = vextq_f32(vi3x5678, vzero, 1);
const float32x4_t vi4x6789 = vextq_f32(vi4x5678, vzero, 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x6789, vget_low_f32(vw4567), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (2 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 15,805 | 37.835381 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-neon-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
float32x4_t vo0p3 = vmulq_lane_f32(vi2x4567, vget_low_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p3 = vmlaq_lane_f32(vo0p3, vi0x3456, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x5678, vget_low_f32(vw4567), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x6789, vget_low_f32(vw4567), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
vi0x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x89AB)));
vi1x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x89AB)));
vi2x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x89AB)));
vi3x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x89AB)));
vi4x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x89AB)));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
float32x4_t vo0p3 = vmulq_lane_f32(vi2x4567, vget_low_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p3 = vmlaq_lane_f32(vo0p3, vi0x3456, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x5678, vget_low_f32(vw4567), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x6789, vget_low_f32(vw4567), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
float32x4_t vo0p3 = vmulq_lane_f32(vi2x4567, vget_low_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p3 = vmlaq_lane_f32(vo0p3, vi0x3456, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x5678, vget_low_f32(vw4567), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x5678, vzero, 1);
const float32x4_t vi1x6789 = vextq_f32(vi1x5678, vzero, 1);
const float32x4_t vi2x6789 = vextq_f32(vi2x5678, vzero, 1);
const float32x4_t vi3x6789 = vextq_f32(vi3x5678, vzero, 1);
const float32x4_t vi4x6789 = vextq_f32(vi4x5678, vzero, 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x6789, vget_low_f32(vw4567), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (2 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 15,937 | 37.873171 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-neon-1x4-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_1x4_acc5(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
float32x4_t vo0p3 = vmulq_lane_f32(vi2x4567, vget_low_f32(vwCDEF), 1);
float32x4_t vo0p4 = vmulq_lane_f32(vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x3456, vget_high_f32(vw0123), 0);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1x3456, vget_high_f32(vw4567), 1);
vo0p4 = vmlaq_lane_f32(vo0p4, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x2345, vget_low_f32(vw0123), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1x2345, vget_high_f32(vw4567), 0);
vo0p4 = vmlaq_lane_f32(vo0p4, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x5678, vget_low_f32(vw4567), 0);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p4 = vmlaq_lane_f32(vo0p4, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x6789, vget_low_f32(vw4567), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p4 = vmlaq_lane_f32(vo0p4, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
vo0p0 = vaddq_f32(vo0p0, vo0p4);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
vi0x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x89AB)));
vi1x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x89AB)));
vi2x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x89AB)));
vi3x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x89AB)));
vi4x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x89AB)));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
float32x4_t vo0p3 = vmulq_lane_f32(vi2x4567, vget_low_f32(vwCDEF), 1);
float32x4_t vo0p4 = vmulq_lane_f32(vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x3456, vget_high_f32(vw0123), 0);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1x3456, vget_high_f32(vw4567), 1);
vo0p4 = vmlaq_lane_f32(vo0p4, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x2345, vget_low_f32(vw0123), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1x2345, vget_high_f32(vw4567), 0);
vo0p4 = vmlaq_lane_f32(vo0p4, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x5678, vget_low_f32(vw4567), 0);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p4 = vmlaq_lane_f32(vo0p4, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x6789, vget_low_f32(vw4567), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p4 = vmlaq_lane_f32(vo0p4, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
vo0p0 = vaddq_f32(vo0p0, vo0p4);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
float32x4_t vo0p3 = vmulq_lane_f32(vi2x4567, vget_low_f32(vwCDEF), 1);
float32x4_t vo0p4 = vmulq_lane_f32(vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x3456, vget_high_f32(vw0123), 0);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1x3456, vget_high_f32(vw4567), 1);
vo0p4 = vmlaq_lane_f32(vo0p4, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x2345, vget_low_f32(vw0123), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1x2345, vget_high_f32(vw4567), 0);
vo0p4 = vmlaq_lane_f32(vo0p4, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x5678, vget_low_f32(vw4567), 0);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p4 = vmlaq_lane_f32(vo0p4, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x5678, vzero, 1);
const float32x4_t vi1x6789 = vextq_f32(vi1x5678, vzero, 1);
const float32x4_t vi2x6789 = vextq_f32(vi2x5678, vzero, 1);
const float32x4_t vi3x6789 = vextq_f32(vi3x5678, vzero, 1);
const float32x4_t vi4x6789 = vextq_f32(vi4x5678, vzero, 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x6789, vget_low_f32(vw4567), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p4 = vmlaq_lane_f32(vo0p4, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
vo0p0 = vaddq_f32(vo0p0, vo0p4);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (2 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 16,069 | 37.910412 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-neon-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vget_low_f32(vwCDEF), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x3456, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x2345, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x6789, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x6789, vwOP, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
vi0x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x89AB)));
vi1x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x89AB)));
vi2x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x89AB)));
vi3x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x89AB)));
vi4x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x89AB)));
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vget_low_f32(vwCDEF), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x3456, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x2345, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x6789, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x6789, vwOP, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vget_low_f32(vwCDEF), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x3456, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x2345, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x5678, vzero, 1);
const float32x4_t vi1x6789 = vextq_f32(vi1x5678, vzero, 1);
const float32x4_t vi2x6789 = vextq_f32(vi2x5678, vzero, 1);
const float32x4_t vi3x6789 = vextq_f32(vi3x5678, vzero, 1);
const float32x4_t vi4x6789 = vextq_f32(vi4x5678, vzero, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x6789, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x6789, vget_low_f32(vwKLMN), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x6789, vwOP, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w & (2 * sizeof(float))) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 15,536 | 37.745636 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-neon-2x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_2x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i5 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
float32x4_t vi5x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5x89AB = vld1q_f32(i5); i5 += 4;
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo1p1 = vmulq_lane_f32(vi1x4567, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw89AB), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vget_low_f32(vwCDEF), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x4567, vget_low_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi4x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x3456, vget_high_f32(vw0123), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi1x3456, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x3456, vget_high_f32(vw4567), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x3456, vget_low_f32(vwCDEF), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi3x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi4x3456, vget_low_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x3456, vget_high_f32(vwKLMN), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi5x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
const float32x4_t vi5x2345 = vextq_f32(vi5x0123, vi5x4567, 2);
vi5x0123 = vi5x4567;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x2345, vget_low_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi2x2345, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x2345, vget_high_f32(vw89AB), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x2345, vget_high_f32(vw89AB), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi4x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vi5x89AB, 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x5678, vget_low_f32(vw4567), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi1x5678, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x5678, vget_low_f32(vw89AB), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x5678, vget_high_f32(vwCDEF), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi3x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi4x5678, vget_high_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi5x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
const float32x4_t vi5x6789 = vextq_f32(vi5x4567, vi5x89AB, 2);
vi5x4567 = vi5x89AB;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x6789, vget_low_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x6789, vget_low_f32(vw4567), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x6789, vget_high_f32(vw89AB), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi2x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi4x6789, vget_low_f32(vwKLMN), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x6789, vwOP, 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo1p0 = vaddq_f32(vo1p0, vo1p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
float32x4_t vi5x89AB = vld1q_f32(i5); i5 += 4;
vi0x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x89AB)));
vi1x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x89AB)));
vi2x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x89AB)));
vi3x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x89AB)));
vi4x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x89AB)));
vi5x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi5x89AB)));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo1p1 = vmulq_lane_f32(vi1x4567, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw89AB), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vget_low_f32(vwCDEF), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x4567, vget_low_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi4x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x3456, vget_high_f32(vw0123), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi1x3456, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x3456, vget_high_f32(vw4567), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x3456, vget_low_f32(vwCDEF), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi3x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi4x3456, vget_low_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x3456, vget_high_f32(vwKLMN), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi5x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
const float32x4_t vi5x2345 = vextq_f32(vi5x0123, vi5x4567, 2);
vi5x0123 = vi5x4567;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x2345, vget_low_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi2x2345, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x2345, vget_high_f32(vw89AB), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x2345, vget_high_f32(vw89AB), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi4x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vi5x89AB, 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x5678, vget_low_f32(vw4567), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi1x5678, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x5678, vget_low_f32(vw89AB), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x5678, vget_high_f32(vwCDEF), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi3x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi4x5678, vget_high_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi5x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
const float32x4_t vi5x6789 = vextq_f32(vi5x4567, vi5x89AB, 2);
vi5x4567 = vi5x89AB;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x6789, vget_low_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x6789, vget_low_f32(vw4567), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x6789, vget_high_f32(vw89AB), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi2x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi4x6789, vget_low_f32(vwKLMN), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x6789, vwOP, 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo1p0 = vaddq_f32(vo1p0, vo1p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
vi5x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi5x4567)));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo1p1 = vmulq_lane_f32(vi1x4567, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw89AB), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vget_low_f32(vwCDEF), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x4567, vget_low_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi4x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x3456, vget_high_f32(vw0123), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi1x3456, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x3456, vget_high_f32(vw4567), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x3456, vget_low_f32(vwCDEF), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi3x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi4x3456, vget_low_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x3456, vget_high_f32(vwKLMN), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi5x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
const float32x4_t vi5x2345 = vextq_f32(vi5x0123, vi5x4567, 2);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x2345, vget_low_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi2x2345, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x2345, vget_high_f32(vw89AB), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x2345, vget_high_f32(vw89AB), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi4x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vzero, 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x5678, vget_low_f32(vw4567), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi1x5678, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x5678, vget_low_f32(vw89AB), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x5678, vget_high_f32(vwCDEF), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi3x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi4x5678, vget_high_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi5x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x5678, vzero, 1);
const float32x4_t vi1x6789 = vextq_f32(vi1x5678, vzero, 1);
const float32x4_t vi2x6789 = vextq_f32(vi2x5678, vzero, 1);
const float32x4_t vi3x6789 = vextq_f32(vi3x5678, vzero, 1);
const float32x4_t vi4x6789 = vextq_f32(vi4x5678, vzero, 1);
const float32x4_t vi5x6789 = vextq_f32(vi5x5678, vzero, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x6789, vget_low_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x6789, vget_low_f32(vw4567), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x6789, vget_high_f32(vw89AB), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi2x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi4x6789, vget_low_f32(vwKLMN), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x6789, vwOP, 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo1p0 = vaddq_f32(vo1p0, vo1p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
float32x2_t vo1_lo = vget_low_f32(vo1);
if (w & (2 * sizeof(float))) {
vst1_f32(o1, vo1_lo); o1 += 2;
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
vo1_lo = vget_high_f32(vo1);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o1, vo1_lo, 0); o1 += 1;
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 23,563 | 43.460377 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-neon-2x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_2x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i5 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
float32x4_t vi5x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5x89AB = vld1q_f32(i5); i5 += 4;
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo1p1 = vmulq_lane_f32(vi1x4567, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
float32x4_t vo1p2 = vmulq_lane_f32(vi2x4567, vget_low_f32(vw89AB), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x4567, vget_low_f32(vwCDEF), 1);
vo1p1 = vmlaq_lane_f32(vo1p1, vi3x4567, vget_low_f32(vwCDEF), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo1p2 = vmlaq_lane_f32(vo1p2, vi4x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x3456, vget_high_f32(vw0123), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi1x3456, vget_high_f32(vw0123), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi1x3456, vget_high_f32(vw4567), 1);
vo1p2 = vmlaq_lane_f32(vo1p2, vi2x3456, vget_high_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_low_f32(vwCDEF), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x3456, vget_low_f32(vwCDEF), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo1p1 = vmlaq_lane_f32(vo1p1, vi4x3456, vget_low_f32(vwGHIJ), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4x3456, vget_high_f32(vwKLMN), 0);
vo1p2 = vmlaq_lane_f32(vo1p2, vi5x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
const float32x4_t vi5x2345 = vextq_f32(vi5x0123, vi5x4567, 2);
vi5x0123 = vi5x4567;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x2345, vget_low_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi2x2345, vget_high_f32(vw4567), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi2x2345, vget_high_f32(vw89AB), 1);
vo1p2 = vmlaq_lane_f32(vo1p2, vi3x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi4x2345, vget_low_f32(vwGHIJ), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x2345, vget_low_f32(vwKLMN), 1);
vo1p1 = vmlaq_lane_f32(vo1p1, vi5x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vi5x89AB, 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x5678, vget_low_f32(vw4567), 0);
vo1p2 = vmlaq_lane_f32(vo1p2, vi1x5678, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x5678, vget_low_f32(vw89AB), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x5678, vget_high_f32(vwCDEF), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi3x5678, vget_high_f32(vwCDEF), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo1p2 = vmlaq_lane_f32(vo1p2, vi4x5678, vget_high_f32(vwGHIJ), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x5678, vwOP, 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
const float32x4_t vi5x6789 = vextq_f32(vi5x4567, vi5x89AB, 2);
vi5x4567 = vi5x89AB;
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x6789, vget_low_f32(vw4567), 1);
vo1p1 = vmlaq_lane_f32(vo1p1, vi1x6789, vget_low_f32(vw4567), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi1x6789, vget_high_f32(vw89AB), 0);
vo1p2 = vmlaq_lane_f32(vo1p2, vi2x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi4x6789, vget_low_f32(vwKLMN), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4x6789, vwOP, 1);
vo1p2 = vmlaq_lane_f32(vo1p2, vi5x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo1p0 = vaddq_f32(vo1p0, vo1p1);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
vo1p0 = vaddq_f32(vo1p0, vo1p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
float32x4_t vi5x89AB = vld1q_f32(i5); i5 += 4;
vi0x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x89AB)));
vi1x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x89AB)));
vi2x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x89AB)));
vi3x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x89AB)));
vi4x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x89AB)));
vi5x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi5x89AB)));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo1p1 = vmulq_lane_f32(vi1x4567, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
float32x4_t vo1p2 = vmulq_lane_f32(vi2x4567, vget_low_f32(vw89AB), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x4567, vget_low_f32(vwCDEF), 1);
vo1p1 = vmlaq_lane_f32(vo1p1, vi3x4567, vget_low_f32(vwCDEF), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo1p2 = vmlaq_lane_f32(vo1p2, vi4x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x3456, vget_high_f32(vw0123), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi1x3456, vget_high_f32(vw0123), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi1x3456, vget_high_f32(vw4567), 1);
vo1p2 = vmlaq_lane_f32(vo1p2, vi2x3456, vget_high_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_low_f32(vwCDEF), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x3456, vget_low_f32(vwCDEF), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo1p1 = vmlaq_lane_f32(vo1p1, vi4x3456, vget_low_f32(vwGHIJ), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4x3456, vget_high_f32(vwKLMN), 0);
vo1p2 = vmlaq_lane_f32(vo1p2, vi5x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
const float32x4_t vi5x2345 = vextq_f32(vi5x0123, vi5x4567, 2);
vi5x0123 = vi5x4567;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x2345, vget_low_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi2x2345, vget_high_f32(vw4567), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi2x2345, vget_high_f32(vw89AB), 1);
vo1p2 = vmlaq_lane_f32(vo1p2, vi3x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi4x2345, vget_low_f32(vwGHIJ), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x2345, vget_low_f32(vwKLMN), 1);
vo1p1 = vmlaq_lane_f32(vo1p1, vi5x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vi5x89AB, 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x5678, vget_low_f32(vw4567), 0);
vo1p2 = vmlaq_lane_f32(vo1p2, vi1x5678, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x5678, vget_low_f32(vw89AB), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x5678, vget_high_f32(vwCDEF), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi3x5678, vget_high_f32(vwCDEF), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo1p2 = vmlaq_lane_f32(vo1p2, vi4x5678, vget_high_f32(vwGHIJ), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x5678, vwOP, 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
const float32x4_t vi5x6789 = vextq_f32(vi5x4567, vi5x89AB, 2);
vi5x4567 = vi5x89AB;
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x6789, vget_low_f32(vw4567), 1);
vo1p1 = vmlaq_lane_f32(vo1p1, vi1x6789, vget_low_f32(vw4567), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi1x6789, vget_high_f32(vw89AB), 0);
vo1p2 = vmlaq_lane_f32(vo1p2, vi2x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi4x6789, vget_low_f32(vwKLMN), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4x6789, vwOP, 1);
vo1p2 = vmlaq_lane_f32(vo1p2, vi5x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo1p0 = vaddq_f32(vo1p0, vo1p1);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
vo1p0 = vaddq_f32(vo1p0, vo1p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
vi5x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi5x4567)));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
float32x4_t vo1p1 = vmulq_lane_f32(vi1x4567, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
float32x4_t vo1p2 = vmulq_lane_f32(vi2x4567, vget_low_f32(vw89AB), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x4567, vget_low_f32(vwCDEF), 1);
vo1p1 = vmlaq_lane_f32(vo1p1, vi3x4567, vget_low_f32(vwCDEF), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo1p2 = vmlaq_lane_f32(vo1p2, vi4x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x3456, vget_high_f32(vw0123), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi1x3456, vget_high_f32(vw0123), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi1x3456, vget_high_f32(vw4567), 1);
vo1p2 = vmlaq_lane_f32(vo1p2, vi2x3456, vget_high_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_low_f32(vwCDEF), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x3456, vget_low_f32(vwCDEF), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo1p1 = vmlaq_lane_f32(vo1p1, vi4x3456, vget_low_f32(vwGHIJ), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4x3456, vget_high_f32(vwKLMN), 0);
vo1p2 = vmlaq_lane_f32(vo1p2, vi5x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
const float32x4_t vi5x2345 = vextq_f32(vi5x0123, vi5x4567, 2);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x2345, vget_low_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi2x2345, vget_high_f32(vw4567), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi2x2345, vget_high_f32(vw89AB), 1);
vo1p2 = vmlaq_lane_f32(vo1p2, vi3x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi4x2345, vget_low_f32(vwGHIJ), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x2345, vget_low_f32(vwKLMN), 1);
vo1p1 = vmlaq_lane_f32(vo1p1, vi5x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vzero, 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x5678, vget_low_f32(vw4567), 0);
vo1p2 = vmlaq_lane_f32(vo1p2, vi1x5678, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x5678, vget_low_f32(vw89AB), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x5678, vget_high_f32(vwCDEF), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi3x5678, vget_high_f32(vwCDEF), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo1p2 = vmlaq_lane_f32(vo1p2, vi4x5678, vget_high_f32(vwGHIJ), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x5678, vwOP, 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x5678, vzero, 1);
const float32x4_t vi1x6789 = vextq_f32(vi1x5678, vzero, 1);
const float32x4_t vi2x6789 = vextq_f32(vi2x5678, vzero, 1);
const float32x4_t vi3x6789 = vextq_f32(vi3x5678, vzero, 1);
const float32x4_t vi4x6789 = vextq_f32(vi4x5678, vzero, 1);
const float32x4_t vi5x6789 = vextq_f32(vi5x5678, vzero, 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x6789, vget_low_f32(vw4567), 1);
vo1p1 = vmlaq_lane_f32(vo1p1, vi1x6789, vget_low_f32(vw4567), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi1x6789, vget_high_f32(vw89AB), 0);
vo1p2 = vmlaq_lane_f32(vo1p2, vi2x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x6789, vget_high_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
vo1p1 = vmlaq_lane_f32(vo1p1, vi4x6789, vget_low_f32(vwKLMN), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4x6789, vwOP, 1);
vo1p2 = vmlaq_lane_f32(vo1p2, vi5x6789, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo1p0 = vaddq_f32(vo1p0, vo1p1);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
vo1p0 = vaddq_f32(vo1p0, vo1p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
float32x2_t vo1_lo = vget_low_f32(vo1);
if (w & (2 * sizeof(float))) {
vst1_f32(o1, vo1_lo); o1 += 2;
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
vo1_lo = vget_high_f32(vo1);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o1, vo1_lo, 0); o1 += 1;
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 23,827 | 43.455224 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-neon-2x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_2x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const uint32x4_t vmask = vld1q_u32(params->neon_stride1.mask);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride1.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride1.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i5 = zero;
}
float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
float32x4_t vi5x0123 = vmovq_n_f32(0.0f);
float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5x89AB = vld1q_f32(i5); i5 += 4;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw89AB), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vget_low_f32(vwCDEF), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x4567, vget_low_f32(vwCDEF), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi4x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x3456, vget_high_f32(vw0123), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x3456, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x3456, vget_high_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_low_f32(vwCDEF), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi4x3456, vget_low_f32(vwGHIJ), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x3456, vget_high_f32(vwKLMN), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
const float32x4_t vi5x2345 = vextq_f32(vi5x0123, vi5x4567, 2);
vi5x0123 = vi5x4567;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x2345, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x2345, vget_high_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x2345, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x2345, vget_high_f32(vw89AB), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi4x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vi5x89AB, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_low_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x5678, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x5678, vget_low_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vget_high_f32(vwCDEF), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi4x5678, vget_high_f32(vwGHIJ), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x5678, vwOP, 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
const float32x4_t vi5x6789 = vextq_f32(vi5x4567, vi5x89AB, 2);
vi5x4567 = vi5x89AB;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x6789, vget_low_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x6789, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x6789, vget_high_f32(vw89AB), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x6789, vget_high_f32(vwCDEF), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x6789, vget_low_f32(vwKLMN), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi4x6789, vget_low_f32(vwKLMN), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x6789, vwOP, 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x6789, vwOP, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
float32x4_t vi5x89AB = vld1q_f32(i5); i5 += 4;
vi0x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x89AB)));
vi1x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x89AB)));
vi2x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x89AB)));
vi3x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x89AB)));
vi4x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x89AB)));
vi5x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi5x89AB)));
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw89AB), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vget_low_f32(vwCDEF), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x4567, vget_low_f32(vwCDEF), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi4x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x3456, vget_high_f32(vw0123), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x3456, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x3456, vget_high_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_low_f32(vwCDEF), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi4x3456, vget_low_f32(vwGHIJ), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x3456, vget_high_f32(vwKLMN), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
vi0x0123 = vi0x4567;
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
vi1x0123 = vi1x4567;
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
vi2x0123 = vi2x4567;
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
vi3x0123 = vi3x4567;
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
vi4x0123 = vi4x4567;
const float32x4_t vi5x2345 = vextq_f32(vi5x0123, vi5x4567, 2);
vi5x0123 = vi5x4567;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x2345, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x2345, vget_high_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x2345, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x2345, vget_high_f32(vw89AB), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi4x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vi5x89AB, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_low_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x5678, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x5678, vget_low_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vget_high_f32(vwCDEF), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi4x5678, vget_high_f32(vwGHIJ), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x5678, vwOP, 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
vi0x4567 = vi0x89AB;
const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
vi1x4567 = vi1x89AB;
const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
vi2x4567 = vi2x89AB;
const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
vi3x4567 = vi3x89AB;
const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
vi4x4567 = vi4x89AB;
const float32x4_t vi5x6789 = vextq_f32(vi5x4567, vi5x89AB, 2);
vi5x4567 = vi5x89AB;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x6789, vget_low_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x6789, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x6789, vget_high_f32(vw89AB), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x6789, vget_high_f32(vwCDEF), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x6789, vget_low_f32(vwKLMN), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi4x6789, vget_low_f32(vwKLMN), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x6789, vwOP, 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x6789, vwOP, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
vi5x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi5x4567)));
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x4567, vget_high_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x4567, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x4567, vget_low_f32(vw89AB), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x4567, vget_low_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x4567, vget_low_f32(vwCDEF), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x4567, vget_low_f32(vwCDEF), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x4567, vget_high_f32(vwGHIJ), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi4x4567, vget_high_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x4567, vget_high_f32(vwKLMN), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x4567, vget_high_f32(vwKLMN), 1);
const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
const float32x4_t vi5x3456 = vextq_f32(vi5x0123, vi5x4567, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x3456, vget_high_f32(vw0123), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x3456, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x3456, vget_high_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_low_f32(vwCDEF), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x3456, vget_low_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x3456, vget_low_f32(vwGHIJ), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi4x3456, vget_low_f32(vwGHIJ), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x3456, vget_high_f32(vwKLMN), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x3456, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
const float32x4_t vi5x2345 = vextq_f32(vi5x0123, vi5x4567, 2);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x2345, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x2345, vget_high_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x2345, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x2345, vget_high_f32(vw89AB), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x2345, vget_high_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x2345, vget_low_f32(vwGHIJ), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi4x2345, vget_low_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x2345, vget_low_f32(vwKLMN), 1);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
const float32x4_t vi5x5678 = vextq_f32(vi5x4567, vzero, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x5678, vget_low_f32(vw4567), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x5678, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x5678, vget_low_f32(vw89AB), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x5678, vget_low_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x5678, vget_high_f32(vwCDEF), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x5678, vget_high_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi4x5678, vget_high_f32(vwGHIJ), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x5678, vwOP, 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x5678, vwOP, 0);
const float32x4_t vi0x6789 = vextq_f32(vi0x5678, vzero, 1);
const float32x4_t vi1x6789 = vextq_f32(vi1x5678, vzero, 1);
const float32x4_t vi2x6789 = vextq_f32(vi2x5678, vzero, 1);
const float32x4_t vi3x6789 = vextq_f32(vi3x5678, vzero, 1);
const float32x4_t vi4x6789 = vextq_f32(vi4x5678, vzero, 1);
const float32x4_t vi5x6789 = vextq_f32(vi5x5678, vzero, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x6789, vget_low_f32(vw4567), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi1x6789, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x6789, vget_high_f32(vw89AB), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi2x6789, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi3x6789, vget_high_f32(vwCDEF), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x6789, vget_low_f32(vwKLMN), 0);
vo1p0 = vmlaq_lane_f32(vo1p0, vi4x6789, vget_low_f32(vwKLMN), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x6789, vwOP, 1);
vo1p0 = vmlaq_lane_f32(vo1p0, vi5x6789, vwOP, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vo1 = vminq_f32(vo1, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
vst1q_f32(o1, vo1); o1 += 4;
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
float32x2_t vo1_lo = vget_low_f32(vo1);
if (w & (2 * sizeof(float))) {
vst1_f32(o1, vo1_lo); o1 += 2;
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
vo1_lo = vget_high_f32(vo1);
}
if (w & (1 * sizeof(float))) {
vst1_lane_f32(o1, vo1_lo, 0); o1 += 1;
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
i5 = (const float*) ((uintptr_t) i4 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 23,294 | 43.456107 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-scalar-1x1-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk03 = weights[4];
const float vk04 = weights[5];
const float vk10 = weights[6];
const float vk11 = weights[7];
const float vk12 = weights[8];
const float vk13 = weights[9];
const float vk14 = weights[10];
const float vk20 = weights[11];
const float vk21 = weights[12];
const float vk22 = weights[13];
const float vk23 = weights[14];
const float vk24 = weights[15];
const float vk30 = weights[16];
const float vk31 = weights[17];
const float vk32 = weights[18];
const float vk33 = weights[19];
const float vk34 = weights[20];
const float vk40 = weights[21];
const float vk41 = weights[22];
const float vk42 = weights[23];
const float vk43 = weights[24];
const float vk44 = weights[25];
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi3x0 = 0.0f;
float vi4x0 = 0.0f;
float vi0x1 = 0.0f;
float vi1x1 = 0.0f;
float vi2x1 = 0.0f;
float vi3x1 = 0.0f;
float vi4x1 = 0.0f;
float vi0x2 = *i0++;
float vi1x2 = *i1++;
float vi2x2 = *i2++;
float vi3x2 = *i3++;
float vi4x2 = *i4++;
size_t w = input_width;
if (w > 1 * sizeof(float)) {
float vi0x3 = *i0++;
float vi1x3 = *i1++;
float vi2x3 = *i2++;
float vi3x3 = *i3++;
float vi4x3 = *i4++;
for (; w > 2 * sizeof(float); w -= 1 * sizeof(float)) {
const float vi0x4 = *i0++;
const float vi1x4 = *i1++;
const float vi2x4 = *i2++;
const float vi3x4 = *i3++;
const float vi4x4 = *i4++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo0p1 += vi3x0 * vk30;
vo0p0 += vi4x0 * vk40;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vi4x0 = vi4x1;
vo0p1 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo0p1 += vi4x1 * vk41;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vi4x1 = vi4x2;
vo0p0 += vi0x2 * vk02;
vo0p1 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo0p0 += vi4x2 * vk42;
vi0x2 = vi0x3;
vi1x2 = vi1x3;
vi2x2 = vi2x3;
vi3x2 = vi3x3;
vi4x2 = vi4x3;
vo0p1 += vi0x3 * vk03;
vo0p0 += vi1x3 * vk13;
vo0p1 += vi2x3 * vk23;
vo0p0 += vi3x3 * vk33;
vo0p1 += vi4x3 * vk43;
vi0x3 = vi0x4;
vi1x3 = vi1x4;
vi2x3 = vi2x4;
vi3x3 = vi3x4;
vi4x3 = vi4x4;
vo0p0 += vi0x4 * vk04;
vo0p1 += vi1x4 * vk14;
vo0p0 += vi2x4 * vk24;
vo0p1 += vi3x4 * vk34;
vo0p0 += vi4x4 * vk44;
vo0p0 += vo0p1;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
assert(w == 2 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo0p1 += vi3x0 * vk30;
vo0p0 += vi4x0 * vk40;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vi4x0 = vi4x1;
vo0p1 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo0p1 += vi4x1 * vk41;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vi4x1 = vi4x2;
vo0p0 += vi0x2 * vk02;
vo0p1 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo0p0 += vi4x2 * vk42;
vi0x2 = vi0x3;
vi1x2 = vi1x3;
vi2x2 = vi2x3;
vi3x2 = vi3x3;
vi4x2 = vi4x3;
vo0p1 += vi0x3 * vk03;
vo0p0 += vi1x3 * vk13;
vo0p1 += vi2x3 * vk23;
vo0p0 += vi3x3 * vk33;
vo0p1 += vi4x3 * vk43;
vo0p0 += vo0p1;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
w -= 1 * sizeof(float);
}
assert(w == 1 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo0p1 += vi3x0 * vk30;
vo0p0 += vi4x0 * vk40;
vo0p1 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo0p1 += vi4x1 * vk41;
vo0p0 += vi0x2 * vk02;
vo0p1 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo0p0 += vi4x2 * vk42;
vo0p0 += vo0p1;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i1 - input_width);
i1 = (const float*) ((uintptr_t) i2 - input_width);
} while (--output_height != 0);
}
| 6,377 | 23.343511 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-scalar-1x1-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk03 = weights[4];
const float vk04 = weights[5];
const float vk10 = weights[6];
const float vk11 = weights[7];
const float vk12 = weights[8];
const float vk13 = weights[9];
const float vk14 = weights[10];
const float vk20 = weights[11];
const float vk21 = weights[12];
const float vk22 = weights[13];
const float vk23 = weights[14];
const float vk24 = weights[15];
const float vk30 = weights[16];
const float vk31 = weights[17];
const float vk32 = weights[18];
const float vk33 = weights[19];
const float vk34 = weights[20];
const float vk40 = weights[21];
const float vk41 = weights[22];
const float vk42 = weights[23];
const float vk43 = weights[24];
const float vk44 = weights[25];
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi3x0 = 0.0f;
float vi4x0 = 0.0f;
float vi0x1 = 0.0f;
float vi1x1 = 0.0f;
float vi2x1 = 0.0f;
float vi3x1 = 0.0f;
float vi4x1 = 0.0f;
float vi0x2 = *i0++;
float vi1x2 = *i1++;
float vi2x2 = *i2++;
float vi3x2 = *i3++;
float vi4x2 = *i4++;
size_t w = input_width;
if (w > 1 * sizeof(float)) {
float vi0x3 = *i0++;
float vi1x3 = *i1++;
float vi2x3 = *i2++;
float vi3x3 = *i3++;
float vi4x3 = *i4++;
for (; w > 2 * sizeof(float); w -= 1 * sizeof(float)) {
const float vi0x4 = *i0++;
const float vi1x4 = *i1++;
const float vi2x4 = *i2++;
const float vi3x4 = *i3++;
const float vi4x4 = *i4++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
vo0p0 += vi3x0 * vk30;
vo0p1 += vi4x0 * vk40;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vi4x0 = vi4x1;
vo0p2 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo0p2 += vi3x1 * vk31;
vo0p0 += vi4x1 * vk41;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vi4x1 = vi4x2;
vo0p1 += vi0x2 * vk02;
vo0p2 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo0p2 += vi4x2 * vk42;
vi0x2 = vi0x3;
vi1x2 = vi1x3;
vi2x2 = vi2x3;
vi3x2 = vi3x3;
vi4x2 = vi4x3;
vo0p0 += vi0x3 * vk03;
vo0p1 += vi1x3 * vk13;
vo0p2 += vi2x3 * vk23;
vo0p0 += vi3x3 * vk33;
vo0p1 += vi4x3 * vk43;
vi0x3 = vi0x4;
vi1x3 = vi1x4;
vi2x3 = vi2x4;
vi3x3 = vi3x4;
vi4x3 = vi4x4;
vo0p2 += vi0x4 * vk04;
vo0p0 += vi1x4 * vk14;
vo0p1 += vi2x4 * vk24;
vo0p2 += vi3x4 * vk34;
vo0p0 += vi4x4 * vk44;
vo0p0 += vo0p1;
vo0p0 += vo0p2;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
assert(w == 2 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
vo0p0 += vi3x0 * vk30;
vo0p1 += vi4x0 * vk40;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vi4x0 = vi4x1;
vo0p2 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo0p2 += vi3x1 * vk31;
vo0p0 += vi4x1 * vk41;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vi4x1 = vi4x2;
vo0p1 += vi0x2 * vk02;
vo0p2 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo0p2 += vi4x2 * vk42;
vi0x2 = vi0x3;
vi1x2 = vi1x3;
vi2x2 = vi2x3;
vi3x2 = vi3x3;
vi4x2 = vi4x3;
vo0p0 += vi0x3 * vk03;
vo0p1 += vi1x3 * vk13;
vo0p2 += vi2x3 * vk23;
vo0p0 += vi3x3 * vk33;
vo0p1 += vi4x3 * vk43;
vo0p0 += vo0p1;
vo0p0 += vo0p2;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
w -= 1 * sizeof(float);
}
assert(w == 1 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
vo0p0 += vi3x0 * vk30;
vo0p1 += vi4x0 * vk40;
vo0p2 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo0p2 += vi3x1 * vk31;
vo0p0 += vi4x1 * vk41;
vo0p1 += vi0x2 * vk02;
vo0p2 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo0p2 += vi4x2 * vk42;
vo0p0 += vo0p1;
vo0p0 += vo0p2;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i1 - input_width);
i1 = (const float*) ((uintptr_t) i2 - input_width);
} while (--output_height != 0);
}
| 6,462 | 23.388679 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-scalar-1x1-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk03 = weights[4];
const float vk04 = weights[5];
const float vk10 = weights[6];
const float vk11 = weights[7];
const float vk12 = weights[8];
const float vk13 = weights[9];
const float vk14 = weights[10];
const float vk20 = weights[11];
const float vk21 = weights[12];
const float vk22 = weights[13];
const float vk23 = weights[14];
const float vk24 = weights[15];
const float vk30 = weights[16];
const float vk31 = weights[17];
const float vk32 = weights[18];
const float vk33 = weights[19];
const float vk34 = weights[20];
const float vk40 = weights[21];
const float vk41 = weights[22];
const float vk42 = weights[23];
const float vk43 = weights[24];
const float vk44 = weights[25];
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi3x0 = 0.0f;
float vi4x0 = 0.0f;
float vi0x1 = 0.0f;
float vi1x1 = 0.0f;
float vi2x1 = 0.0f;
float vi3x1 = 0.0f;
float vi4x1 = 0.0f;
float vi0x2 = *i0++;
float vi1x2 = *i1++;
float vi2x2 = *i2++;
float vi3x2 = *i3++;
float vi4x2 = *i4++;
size_t w = input_width;
if (w > 1 * sizeof(float)) {
float vi0x3 = *i0++;
float vi1x3 = *i1++;
float vi2x3 = *i2++;
float vi3x3 = *i3++;
float vi4x3 = *i4++;
for (; w > 2 * sizeof(float); w -= 1 * sizeof(float)) {
const float vi0x4 = *i0++;
const float vi1x4 = *i1++;
const float vi2x4 = *i2++;
const float vi3x4 = *i3++;
const float vi4x4 = *i4++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
float vo0p3 = vi3x0 * vk30;
vo0p0 += vi4x0 * vk40;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vi4x0 = vi4x1;
vo0p1 += vi0x1 * vk01;
vo0p2 += vi1x1 * vk11;
vo0p3 += vi2x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo0p1 += vi4x1 * vk41;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vi4x1 = vi4x2;
vo0p2 += vi0x2 * vk02;
vo0p3 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo0p2 += vi4x2 * vk42;
vi0x2 = vi0x3;
vi1x2 = vi1x3;
vi2x2 = vi2x3;
vi3x2 = vi3x3;
vi4x2 = vi4x3;
vo0p3 += vi0x3 * vk03;
vo0p0 += vi1x3 * vk13;
vo0p1 += vi2x3 * vk23;
vo0p2 += vi3x3 * vk33;
vo0p3 += vi4x3 * vk43;
vi0x3 = vi0x4;
vi1x3 = vi1x4;
vi2x3 = vi2x4;
vi3x3 = vi3x4;
vi4x3 = vi4x4;
vo0p0 += vi0x4 * vk04;
vo0p1 += vi1x4 * vk14;
vo0p2 += vi2x4 * vk24;
vo0p3 += vi3x4 * vk34;
vo0p0 += vi4x4 * vk44;
vo0p0 += vo0p1;
vo0p2 += vo0p3;
vo0p0 += vo0p2;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
assert(w == 2 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
float vo0p3 = vi3x0 * vk30;
vo0p0 += vi4x0 * vk40;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vi4x0 = vi4x1;
vo0p1 += vi0x1 * vk01;
vo0p2 += vi1x1 * vk11;
vo0p3 += vi2x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo0p1 += vi4x1 * vk41;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vi4x1 = vi4x2;
vo0p2 += vi0x2 * vk02;
vo0p3 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo0p2 += vi4x2 * vk42;
vi0x2 = vi0x3;
vi1x2 = vi1x3;
vi2x2 = vi2x3;
vi3x2 = vi3x3;
vi4x2 = vi4x3;
vo0p3 += vi0x3 * vk03;
vo0p0 += vi1x3 * vk13;
vo0p1 += vi2x3 * vk23;
vo0p2 += vi3x3 * vk33;
vo0p3 += vi4x3 * vk43;
vo0p0 += vo0p1;
vo0p2 += vo0p3;
vo0p0 += vo0p2;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
w -= 1 * sizeof(float);
}
assert(w == 1 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
float vo0p3 = vi3x0 * vk30;
vo0p0 += vi4x0 * vk40;
vo0p1 += vi0x1 * vk01;
vo0p2 += vi1x1 * vk11;
vo0p3 += vi2x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo0p1 += vi4x1 * vk41;
vo0p2 += vi0x2 * vk02;
vo0p3 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo0p2 += vi4x2 * vk42;
vo0p0 += vo0p1;
vo0p2 += vo0p3;
vo0p0 += vo0p2;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i1 - input_width);
i1 = (const float*) ((uintptr_t) i2 - input_width);
} while (--output_height != 0);
}
| 6,547 | 23.432836 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-scalar-1x1-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc5(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk03 = weights[4];
const float vk04 = weights[5];
const float vk10 = weights[6];
const float vk11 = weights[7];
const float vk12 = weights[8];
const float vk13 = weights[9];
const float vk14 = weights[10];
const float vk20 = weights[11];
const float vk21 = weights[12];
const float vk22 = weights[13];
const float vk23 = weights[14];
const float vk24 = weights[15];
const float vk30 = weights[16];
const float vk31 = weights[17];
const float vk32 = weights[18];
const float vk33 = weights[19];
const float vk34 = weights[20];
const float vk40 = weights[21];
const float vk41 = weights[22];
const float vk42 = weights[23];
const float vk43 = weights[24];
const float vk44 = weights[25];
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi3x0 = 0.0f;
float vi4x0 = 0.0f;
float vi0x1 = 0.0f;
float vi1x1 = 0.0f;
float vi2x1 = 0.0f;
float vi3x1 = 0.0f;
float vi4x1 = 0.0f;
float vi0x2 = *i0++;
float vi1x2 = *i1++;
float vi2x2 = *i2++;
float vi3x2 = *i3++;
float vi4x2 = *i4++;
size_t w = input_width;
if (w > 1 * sizeof(float)) {
float vi0x3 = *i0++;
float vi1x3 = *i1++;
float vi2x3 = *i2++;
float vi3x3 = *i3++;
float vi4x3 = *i4++;
for (; w > 2 * sizeof(float); w -= 1 * sizeof(float)) {
const float vi0x4 = *i0++;
const float vi1x4 = *i1++;
const float vi2x4 = *i2++;
const float vi3x4 = *i3++;
const float vi4x4 = *i4++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
float vo0p3 = vi3x0 * vk30;
float vo0p4 = vi4x0 * vk40;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vi4x0 = vi4x1;
vo0p0 += vi0x1 * vk01;
vo0p1 += vi1x1 * vk11;
vo0p2 += vi2x1 * vk21;
vo0p3 += vi3x1 * vk31;
vo0p4 += vi4x1 * vk41;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vi4x1 = vi4x2;
vo0p0 += vi0x2 * vk02;
vo0p1 += vi1x2 * vk12;
vo0p2 += vi2x2 * vk22;
vo0p3 += vi3x2 * vk32;
vo0p4 += vi4x2 * vk42;
vi0x2 = vi0x3;
vi1x2 = vi1x3;
vi2x2 = vi2x3;
vi3x2 = vi3x3;
vi4x2 = vi4x3;
vo0p0 += vi0x3 * vk03;
vo0p1 += vi1x3 * vk13;
vo0p2 += vi2x3 * vk23;
vo0p3 += vi3x3 * vk33;
vo0p4 += vi4x3 * vk43;
vi0x3 = vi0x4;
vi1x3 = vi1x4;
vi2x3 = vi2x4;
vi3x3 = vi3x4;
vi4x3 = vi4x4;
vo0p0 += vi0x4 * vk04;
vo0p1 += vi1x4 * vk14;
vo0p2 += vi2x4 * vk24;
vo0p3 += vi3x4 * vk34;
vo0p4 += vi4x4 * vk44;
vo0p0 += vo0p1;
vo0p2 += vo0p3;
vo0p0 += vo0p2;
vo0p0 += vo0p4;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
assert(w == 2 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
float vo0p3 = vi3x0 * vk30;
float vo0p4 = vi4x0 * vk40;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vi4x0 = vi4x1;
vo0p0 += vi0x1 * vk01;
vo0p1 += vi1x1 * vk11;
vo0p2 += vi2x1 * vk21;
vo0p3 += vi3x1 * vk31;
vo0p4 += vi4x1 * vk41;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vi4x1 = vi4x2;
vo0p0 += vi0x2 * vk02;
vo0p1 += vi1x2 * vk12;
vo0p2 += vi2x2 * vk22;
vo0p3 += vi3x2 * vk32;
vo0p4 += vi4x2 * vk42;
vi0x2 = vi0x3;
vi1x2 = vi1x3;
vi2x2 = vi2x3;
vi3x2 = vi3x3;
vi4x2 = vi4x3;
vo0p0 += vi0x3 * vk03;
vo0p1 += vi1x3 * vk13;
vo0p2 += vi2x3 * vk23;
vo0p3 += vi3x3 * vk33;
vo0p4 += vi4x3 * vk43;
vo0p0 += vo0p1;
vo0p2 += vo0p3;
vo0p0 += vo0p2;
vo0p0 += vo0p4;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
w -= 1 * sizeof(float);
}
assert(w == 1 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
float vo0p3 = vi3x0 * vk30;
float vo0p4 = vi4x0 * vk40;
vo0p0 += vi0x1 * vk01;
vo0p1 += vi1x1 * vk11;
vo0p2 += vi2x1 * vk21;
vo0p3 += vi3x1 * vk31;
vo0p4 += vi4x1 * vk41;
vo0p0 += vi0x2 * vk02;
vo0p1 += vi1x2 * vk12;
vo0p2 += vi2x2 * vk22;
vo0p3 += vi3x2 * vk32;
vo0p4 += vi4x2 * vk42;
vo0p0 += vo0p1;
vo0p2 += vo0p3;
vo0p0 += vo0p2;
vo0p0 += vo0p4;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i1 - input_width);
i1 = (const float*) ((uintptr_t) i2 - input_width);
} while (--output_height != 0);
}
| 6,632 | 23.476015 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-scalar-1x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk03 = weights[4];
const float vk04 = weights[5];
const float vk10 = weights[6];
const float vk11 = weights[7];
const float vk12 = weights[8];
const float vk13 = weights[9];
const float vk14 = weights[10];
const float vk20 = weights[11];
const float vk21 = weights[12];
const float vk22 = weights[13];
const float vk23 = weights[14];
const float vk24 = weights[15];
const float vk30 = weights[16];
const float vk31 = weights[17];
const float vk32 = weights[18];
const float vk33 = weights[19];
const float vk34 = weights[20];
const float vk40 = weights[21];
const float vk41 = weights[22];
const float vk42 = weights[23];
const float vk43 = weights[24];
const float vk44 = weights[25];
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi3x0 = 0.0f;
float vi4x0 = 0.0f;
float vi0x1 = 0.0f;
float vi1x1 = 0.0f;
float vi2x1 = 0.0f;
float vi3x1 = 0.0f;
float vi4x1 = 0.0f;
float vi0x2 = *i0++;
float vi1x2 = *i1++;
float vi2x2 = *i2++;
float vi3x2 = *i3++;
float vi4x2 = *i4++;
size_t w = input_width;
if (w > 1 * sizeof(float)) {
float vi0x3 = *i0++;
float vi1x3 = *i1++;
float vi2x3 = *i2++;
float vi3x3 = *i3++;
float vi4x3 = *i4++;
for (; w > 2 * sizeof(float); w -= 1 * sizeof(float)) {
const float vi0x4 = *i0++;
const float vi1x4 = *i1++;
const float vi2x4 = *i2++;
const float vi3x4 = *i3++;
const float vi4x4 = *i4++;
float vo0p0 = vbias + vi0x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo0p0 += vi3x0 * vk30;
vo0p0 += vi4x0 * vk40;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vi4x0 = vi4x1;
vo0p0 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p0 += vi2x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo0p0 += vi4x1 * vk41;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vi4x1 = vi4x2;
vo0p0 += vi0x2 * vk02;
vo0p0 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p0 += vi3x2 * vk32;
vo0p0 += vi4x2 * vk42;
vi0x2 = vi0x3;
vi1x2 = vi1x3;
vi2x2 = vi2x3;
vi3x2 = vi3x3;
vi4x2 = vi4x3;
vo0p0 += vi0x3 * vk03;
vo0p0 += vi1x3 * vk13;
vo0p0 += vi2x3 * vk23;
vo0p0 += vi3x3 * vk33;
vo0p0 += vi4x3 * vk43;
vi0x3 = vi0x4;
vi1x3 = vi1x4;
vi2x3 = vi2x4;
vi3x3 = vi3x4;
vi4x3 = vi4x4;
vo0p0 += vi0x4 * vk04;
vo0p0 += vi1x4 * vk14;
vo0p0 += vi2x4 * vk24;
vo0p0 += vi3x4 * vk34;
vo0p0 += vi4x4 * vk44;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
assert(w == 2 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo0p0 += vi3x0 * vk30;
vo0p0 += vi4x0 * vk40;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vi4x0 = vi4x1;
vo0p0 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p0 += vi2x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo0p0 += vi4x1 * vk41;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vi4x1 = vi4x2;
vo0p0 += vi0x2 * vk02;
vo0p0 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p0 += vi3x2 * vk32;
vo0p0 += vi4x2 * vk42;
vi0x2 = vi0x3;
vi1x2 = vi1x3;
vi2x2 = vi2x3;
vi3x2 = vi3x3;
vi4x2 = vi4x3;
vo0p0 += vi0x3 * vk03;
vo0p0 += vi1x3 * vk13;
vo0p0 += vi2x3 * vk23;
vo0p0 += vi3x3 * vk33;
vo0p0 += vi4x3 * vk43;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
w -= 1 * sizeof(float);
}
assert(w == 1 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo0p0 += vi3x0 * vk30;
vo0p0 += vi4x0 * vk40;
vo0p0 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p0 += vi2x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo0p0 += vi4x1 * vk41;
vo0p0 += vi0x2 * vk02;
vo0p0 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p0 += vi3x2 * vk32;
vo0p0 += vi4x2 * vk42;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i1 - input_width);
i1 = (const float*) ((uintptr_t) i2 - input_width);
} while (--output_height != 0);
}
| 6,287 | 23.277992 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-scalar-2x1-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_2x1_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk03 = weights[4];
const float vk04 = weights[5];
const float vk10 = weights[6];
const float vk11 = weights[7];
const float vk12 = weights[8];
const float vk13 = weights[9];
const float vk14 = weights[10];
const float vk20 = weights[11];
const float vk21 = weights[12];
const float vk22 = weights[13];
const float vk23 = weights[14];
const float vk24 = weights[15];
const float vk30 = weights[16];
const float vk31 = weights[17];
const float vk32 = weights[18];
const float vk33 = weights[19];
const float vk34 = weights[20];
const float vk40 = weights[21];
const float vk41 = weights[22];
const float vk42 = weights[23];
const float vk43 = weights[24];
const float vk44 = weights[25];
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i5 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi3x0 = 0.0f;
float vi4x0 = 0.0f;
float vi5x0 = 0.0f;
float vi0x1 = 0.0f;
float vi1x1 = 0.0f;
float vi2x1 = 0.0f;
float vi3x1 = 0.0f;
float vi4x1 = 0.0f;
float vi5x1 = 0.0f;
float vi0x2 = *i0++;
float vi1x2 = *i1++;
float vi2x2 = *i2++;
float vi3x2 = *i3++;
float vi4x2 = *i4++;
float vi5x2 = *i5++;
size_t w = input_width;
if (w > 1 * sizeof(float)) {
float vi0x3 = *i0++;
float vi1x3 = *i1++;
float vi2x3 = *i2++;
float vi3x3 = *i3++;
float vi4x3 = *i4++;
float vi5x3 = *i5++;
for (; w > 2 * sizeof(float); w -= 1 * sizeof(float)) {
const float vi0x4 = *i0++;
const float vi1x4 = *i1++;
const float vi2x4 = *i2++;
const float vi3x4 = *i3++;
const float vi4x4 = *i4++;
const float vi5x4 = *i5++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo1p1 = vi2x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo1p0 += vi3x0 * vk20;
vo0p1 += vi3x0 * vk30;
vo1p1 += vi4x0 * vk30;
vo0p0 += vi4x0 * vk40;
vo1p0 += vi5x0 * vk40;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vi4x0 = vi4x1;
vi5x0 = vi5x1;
vo0p1 += vi0x1 * vk01;
vo1p1 += vi1x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo1p1 += vi3x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo1p0 += vi4x1 * vk31;
vo0p1 += vi4x1 * vk41;
vo1p1 += vi5x1 * vk41;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vi4x1 = vi4x2;
vi5x1 = vi5x2;
vo0p0 += vi0x2 * vk02;
vo1p0 += vi1x2 * vk02;
vo0p1 += vi1x2 * vk12;
vo1p1 += vi2x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo1p0 += vi3x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo1p1 += vi4x2 * vk32;
vo0p0 += vi4x2 * vk42;
vo1p0 += vi5x2 * vk42;
vi0x2 = vi0x3;
vi1x2 = vi1x3;
vi2x2 = vi2x3;
vi3x2 = vi3x3;
vi4x2 = vi4x3;
vi5x2 = vi5x3;
vo0p1 += vi0x3 * vk03;
vo1p1 += vi1x3 * vk03;
vo0p0 += vi1x3 * vk13;
vo1p0 += vi2x3 * vk13;
vo0p1 += vi2x3 * vk23;
vo1p1 += vi3x3 * vk23;
vo0p0 += vi3x3 * vk33;
vo1p0 += vi4x3 * vk33;
vo0p1 += vi4x3 * vk43;
vo1p1 += vi5x3 * vk43;
vi0x3 = vi0x4;
vi1x3 = vi1x4;
vi2x3 = vi2x4;
vi3x3 = vi3x4;
vi4x3 = vi4x4;
vi5x3 = vi5x4;
vo0p0 += vi0x4 * vk04;
vo1p0 += vi1x4 * vk04;
vo0p1 += vi1x4 * vk14;
vo1p1 += vi2x4 * vk14;
vo0p0 += vi2x4 * vk24;
vo1p0 += vi3x4 * vk24;
vo0p1 += vi3x4 * vk34;
vo1p1 += vi4x4 * vk34;
vo0p0 += vi4x4 * vk44;
vo1p0 += vi5x4 * vk44;
vo0p0 += vo0p1;
vo1p0 += vo1p1;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
*o1++ = vo1;
*o0++ = vo0;
}
assert(w == 2 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo1p1 = vi2x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo1p0 += vi3x0 * vk20;
vo0p1 += vi3x0 * vk30;
vo1p1 += vi4x0 * vk30;
vo0p0 += vi4x0 * vk40;
vo1p0 += vi5x0 * vk40;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vi4x0 = vi4x1;
vi5x0 = vi5x1;
vo0p1 += vi0x1 * vk01;
vo1p1 += vi1x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo1p1 += vi3x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo1p0 += vi4x1 * vk31;
vo0p1 += vi4x1 * vk41;
vo1p1 += vi5x1 * vk41;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vi4x1 = vi4x2;
vi5x1 = vi5x2;
vo0p0 += vi0x2 * vk02;
vo1p0 += vi1x2 * vk02;
vo0p1 += vi1x2 * vk12;
vo1p1 += vi2x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo1p0 += vi3x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo1p1 += vi4x2 * vk32;
vo0p0 += vi4x2 * vk42;
vo1p0 += vi5x2 * vk42;
vi0x2 = vi0x3;
vi1x2 = vi1x3;
vi2x2 = vi2x3;
vi3x2 = vi3x3;
vi4x2 = vi4x3;
vi5x2 = vi5x3;
vo0p1 += vi0x3 * vk03;
vo1p1 += vi1x3 * vk03;
vo0p0 += vi1x3 * vk13;
vo1p0 += vi2x3 * vk13;
vo0p1 += vi2x3 * vk23;
vo1p1 += vi3x3 * vk23;
vo0p0 += vi3x3 * vk33;
vo1p0 += vi4x3 * vk33;
vo0p1 += vi4x3 * vk43;
vo1p1 += vi5x3 * vk43;
vo0p0 += vo0p1;
vo1p0 += vo1p1;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
*o1++ = vo1;
*o0++ = vo0;
}
w -= 1 * sizeof(float);
}
assert(w == 1 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo1p1 = vi2x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo1p0 += vi3x0 * vk20;
vo0p1 += vi3x0 * vk30;
vo1p1 += vi4x0 * vk30;
vo0p0 += vi4x0 * vk40;
vo1p0 += vi5x0 * vk40;
vo0p1 += vi0x1 * vk01;
vo1p1 += vi1x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo1p1 += vi3x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo1p0 += vi4x1 * vk31;
vo0p1 += vi4x1 * vk41;
vo1p1 += vi5x1 * vk41;
vo0p0 += vi0x2 * vk02;
vo1p0 += vi1x2 * vk02;
vo0p1 += vi1x2 * vk12;
vo1p1 += vi2x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo1p0 += vi3x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo1p1 += vi4x2 * vk32;
vo0p0 += vi4x2 * vk42;
vo1p0 += vi5x2 * vk42;
vo0p0 += vo0p1;
vo1p0 += vo1p1;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
*o1++ = vo1;
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i2 - input_width);
i1 = (const float*) ((uintptr_t) i3 - input_width);
i2 = i3;
i3 = i4;
i4 = i5;
i5 = (const float*) ((uintptr_t) i4 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 9,347 | 25.038997 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-scalar-2x1-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_2x1_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk03 = weights[4];
const float vk04 = weights[5];
const float vk10 = weights[6];
const float vk11 = weights[7];
const float vk12 = weights[8];
const float vk13 = weights[9];
const float vk14 = weights[10];
const float vk20 = weights[11];
const float vk21 = weights[12];
const float vk22 = weights[13];
const float vk23 = weights[14];
const float vk24 = weights[15];
const float vk30 = weights[16];
const float vk31 = weights[17];
const float vk32 = weights[18];
const float vk33 = weights[19];
const float vk34 = weights[20];
const float vk40 = weights[21];
const float vk41 = weights[22];
const float vk42 = weights[23];
const float vk43 = weights[24];
const float vk44 = weights[25];
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i5 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi3x0 = 0.0f;
float vi4x0 = 0.0f;
float vi5x0 = 0.0f;
float vi0x1 = 0.0f;
float vi1x1 = 0.0f;
float vi2x1 = 0.0f;
float vi3x1 = 0.0f;
float vi4x1 = 0.0f;
float vi5x1 = 0.0f;
float vi0x2 = *i0++;
float vi1x2 = *i1++;
float vi2x2 = *i2++;
float vi3x2 = *i3++;
float vi4x2 = *i4++;
float vi5x2 = *i5++;
size_t w = input_width;
if (w > 1 * sizeof(float)) {
float vi0x3 = *i0++;
float vi1x3 = *i1++;
float vi2x3 = *i2++;
float vi3x3 = *i3++;
float vi4x3 = *i4++;
float vi5x3 = *i5++;
for (; w > 2 * sizeof(float); w -= 1 * sizeof(float)) {
const float vi0x4 = *i0++;
const float vi1x4 = *i1++;
const float vi2x4 = *i2++;
const float vi3x4 = *i3++;
const float vi4x4 = *i4++;
const float vi5x4 = *i5++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo1p1 = vi2x0 * vk10;
float vo0p2 = vi2x0 * vk20;
float vo1p2 = vi3x0 * vk20;
vo0p0 += vi3x0 * vk30;
vo1p0 += vi4x0 * vk30;
vo0p1 += vi4x0 * vk40;
vo1p1 += vi5x0 * vk40;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vi4x0 = vi4x1;
vi5x0 = vi5x1;
vo0p2 += vi0x1 * vk01;
vo1p2 += vi1x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo1p1 += vi3x1 * vk21;
vo0p2 += vi3x1 * vk31;
vo1p2 += vi4x1 * vk31;
vo0p0 += vi4x1 * vk41;
vo1p0 += vi5x1 * vk41;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vi4x1 = vi4x2;
vi5x1 = vi5x2;
vo0p1 += vi0x2 * vk02;
vo1p1 += vi1x2 * vk02;
vo0p2 += vi1x2 * vk12;
vo1p2 += vi2x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo1p0 += vi3x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo1p1 += vi4x2 * vk32;
vo0p2 += vi4x2 * vk42;
vo1p2 += vi5x2 * vk42;
vi0x2 = vi0x3;
vi1x2 = vi1x3;
vi2x2 = vi2x3;
vi3x2 = vi3x3;
vi4x2 = vi4x3;
vi5x2 = vi5x3;
vo0p0 += vi0x3 * vk03;
vo1p0 += vi1x3 * vk03;
vo0p1 += vi1x3 * vk13;
vo1p1 += vi2x3 * vk13;
vo0p2 += vi2x3 * vk23;
vo1p2 += vi3x3 * vk23;
vo0p0 += vi3x3 * vk33;
vo1p0 += vi4x3 * vk33;
vo0p1 += vi4x3 * vk43;
vo1p1 += vi5x3 * vk43;
vi0x3 = vi0x4;
vi1x3 = vi1x4;
vi2x3 = vi2x4;
vi3x3 = vi3x4;
vi4x3 = vi4x4;
vi5x3 = vi5x4;
vo0p2 += vi0x4 * vk04;
vo1p2 += vi1x4 * vk04;
vo0p0 += vi1x4 * vk14;
vo1p0 += vi2x4 * vk14;
vo0p1 += vi2x4 * vk24;
vo1p1 += vi3x4 * vk24;
vo0p2 += vi3x4 * vk34;
vo1p2 += vi4x4 * vk34;
vo0p0 += vi4x4 * vk44;
vo1p0 += vi5x4 * vk44;
vo0p0 += vo0p1;
vo1p0 += vo1p1;
vo0p0 += vo0p2;
vo1p0 += vo1p2;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
*o1++ = vo1;
*o0++ = vo0;
}
assert(w == 2 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo1p1 = vi2x0 * vk10;
float vo0p2 = vi2x0 * vk20;
float vo1p2 = vi3x0 * vk20;
vo0p0 += vi3x0 * vk30;
vo1p0 += vi4x0 * vk30;
vo0p1 += vi4x0 * vk40;
vo1p1 += vi5x0 * vk40;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vi4x0 = vi4x1;
vi5x0 = vi5x1;
vo0p2 += vi0x1 * vk01;
vo1p2 += vi1x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo1p1 += vi3x1 * vk21;
vo0p2 += vi3x1 * vk31;
vo1p2 += vi4x1 * vk31;
vo0p0 += vi4x1 * vk41;
vo1p0 += vi5x1 * vk41;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vi4x1 = vi4x2;
vi5x1 = vi5x2;
vo0p1 += vi0x2 * vk02;
vo1p1 += vi1x2 * vk02;
vo0p2 += vi1x2 * vk12;
vo1p2 += vi2x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo1p0 += vi3x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo1p1 += vi4x2 * vk32;
vo0p2 += vi4x2 * vk42;
vo1p2 += vi5x2 * vk42;
vi0x2 = vi0x3;
vi1x2 = vi1x3;
vi2x2 = vi2x3;
vi3x2 = vi3x3;
vi4x2 = vi4x3;
vi5x2 = vi5x3;
vo0p0 += vi0x3 * vk03;
vo1p0 += vi1x3 * vk03;
vo0p1 += vi1x3 * vk13;
vo1p1 += vi2x3 * vk13;
vo0p2 += vi2x3 * vk23;
vo1p2 += vi3x3 * vk23;
vo0p0 += vi3x3 * vk33;
vo1p0 += vi4x3 * vk33;
vo0p1 += vi4x3 * vk43;
vo1p1 += vi5x3 * vk43;
vo0p0 += vo0p1;
vo1p0 += vo1p1;
vo0p0 += vo0p2;
vo1p0 += vo1p2;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
*o1++ = vo1;
*o0++ = vo0;
}
w -= 1 * sizeof(float);
}
assert(w == 1 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo1p1 = vi2x0 * vk10;
float vo0p2 = vi2x0 * vk20;
float vo1p2 = vi3x0 * vk20;
vo0p0 += vi3x0 * vk30;
vo1p0 += vi4x0 * vk30;
vo0p1 += vi4x0 * vk40;
vo1p1 += vi5x0 * vk40;
vo0p2 += vi0x1 * vk01;
vo1p2 += vi1x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo1p1 += vi3x1 * vk21;
vo0p2 += vi3x1 * vk31;
vo1p2 += vi4x1 * vk31;
vo0p0 += vi4x1 * vk41;
vo1p0 += vi5x1 * vk41;
vo0p1 += vi0x2 * vk02;
vo1p1 += vi1x2 * vk02;
vo0p2 += vi1x2 * vk12;
vo1p2 += vi2x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo1p0 += vi3x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo1p1 += vi4x2 * vk32;
vo0p2 += vi4x2 * vk42;
vo1p2 += vi5x2 * vk42;
vo0p0 += vo0p1;
vo1p0 += vo1p1;
vo0p0 += vo0p2;
vo1p0 += vo1p2;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
*o1++ = vo1;
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i2 - input_width);
i1 = (const float*) ((uintptr_t) i3 - input_width);
i2 = i3;
i3 = i4;
i4 = i5;
i5 = (const float*) ((uintptr_t) i4 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 9,517 | 25.076712 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-scalar-2x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_2x1(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk03 = weights[4];
const float vk04 = weights[5];
const float vk10 = weights[6];
const float vk11 = weights[7];
const float vk12 = weights[8];
const float vk13 = weights[9];
const float vk14 = weights[10];
const float vk20 = weights[11];
const float vk21 = weights[12];
const float vk22 = weights[13];
const float vk23 = weights[14];
const float vk24 = weights[15];
const float vk30 = weights[16];
const float vk31 = weights[17];
const float vk32 = weights[18];
const float vk33 = weights[19];
const float vk34 = weights[20];
const float vk40 = weights[21];
const float vk41 = weights[22];
const float vk42 = weights[23];
const float vk43 = weights[24];
const float vk44 = weights[25];
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i5 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi3x0 = 0.0f;
float vi4x0 = 0.0f;
float vi5x0 = 0.0f;
float vi0x1 = 0.0f;
float vi1x1 = 0.0f;
float vi2x1 = 0.0f;
float vi3x1 = 0.0f;
float vi4x1 = 0.0f;
float vi5x1 = 0.0f;
float vi0x2 = *i0++;
float vi1x2 = *i1++;
float vi2x2 = *i2++;
float vi3x2 = *i3++;
float vi4x2 = *i4++;
float vi5x2 = *i5++;
size_t w = input_width;
if (w > 1 * sizeof(float)) {
float vi0x3 = *i0++;
float vi1x3 = *i1++;
float vi2x3 = *i2++;
float vi3x3 = *i3++;
float vi4x3 = *i4++;
float vi5x3 = *i5++;
for (; w > 2 * sizeof(float); w -= 1 * sizeof(float)) {
const float vi0x4 = *i0++;
const float vi1x4 = *i1++;
const float vi2x4 = *i2++;
const float vi3x4 = *i3++;
const float vi4x4 = *i4++;
const float vi5x4 = *i5++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo1p0 += vi2x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo1p0 += vi3x0 * vk20;
vo0p0 += vi3x0 * vk30;
vo1p0 += vi4x0 * vk30;
vo0p0 += vi4x0 * vk40;
vo1p0 += vi5x0 * vk40;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vi4x0 = vi4x1;
vi5x0 = vi5x1;
vo0p0 += vi0x1 * vk01;
vo1p0 += vi1x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo0p0 += vi2x1 * vk21;
vo1p0 += vi3x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo1p0 += vi4x1 * vk31;
vo0p0 += vi4x1 * vk41;
vo1p0 += vi5x1 * vk41;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vi4x1 = vi4x2;
vi5x1 = vi5x2;
vo0p0 += vi0x2 * vk02;
vo1p0 += vi1x2 * vk02;
vo0p0 += vi1x2 * vk12;
vo1p0 += vi2x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo1p0 += vi3x2 * vk22;
vo0p0 += vi3x2 * vk32;
vo1p0 += vi4x2 * vk32;
vo0p0 += vi4x2 * vk42;
vo1p0 += vi5x2 * vk42;
vi0x2 = vi0x3;
vi1x2 = vi1x3;
vi2x2 = vi2x3;
vi3x2 = vi3x3;
vi4x2 = vi4x3;
vi5x2 = vi5x3;
vo0p0 += vi0x3 * vk03;
vo1p0 += vi1x3 * vk03;
vo0p0 += vi1x3 * vk13;
vo1p0 += vi2x3 * vk13;
vo0p0 += vi2x3 * vk23;
vo1p0 += vi3x3 * vk23;
vo0p0 += vi3x3 * vk33;
vo1p0 += vi4x3 * vk33;
vo0p0 += vi4x3 * vk43;
vo1p0 += vi5x3 * vk43;
vi0x3 = vi0x4;
vi1x3 = vi1x4;
vi2x3 = vi2x4;
vi3x3 = vi3x4;
vi4x3 = vi4x4;
vi5x3 = vi5x4;
vo0p0 += vi0x4 * vk04;
vo1p0 += vi1x4 * vk04;
vo0p0 += vi1x4 * vk14;
vo1p0 += vi2x4 * vk14;
vo0p0 += vi2x4 * vk24;
vo1p0 += vi3x4 * vk24;
vo0p0 += vi3x4 * vk34;
vo1p0 += vi4x4 * vk34;
vo0p0 += vi4x4 * vk44;
vo1p0 += vi5x4 * vk44;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
*o1++ = vo1;
*o0++ = vo0;
}
assert(w == 2 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo1p0 += vi2x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo1p0 += vi3x0 * vk20;
vo0p0 += vi3x0 * vk30;
vo1p0 += vi4x0 * vk30;
vo0p0 += vi4x0 * vk40;
vo1p0 += vi5x0 * vk40;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vi4x0 = vi4x1;
vi5x0 = vi5x1;
vo0p0 += vi0x1 * vk01;
vo1p0 += vi1x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo0p0 += vi2x1 * vk21;
vo1p0 += vi3x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo1p0 += vi4x1 * vk31;
vo0p0 += vi4x1 * vk41;
vo1p0 += vi5x1 * vk41;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vi4x1 = vi4x2;
vi5x1 = vi5x2;
vo0p0 += vi0x2 * vk02;
vo1p0 += vi1x2 * vk02;
vo0p0 += vi1x2 * vk12;
vo1p0 += vi2x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo1p0 += vi3x2 * vk22;
vo0p0 += vi3x2 * vk32;
vo1p0 += vi4x2 * vk32;
vo0p0 += vi4x2 * vk42;
vo1p0 += vi5x2 * vk42;
vi0x2 = vi0x3;
vi1x2 = vi1x3;
vi2x2 = vi2x3;
vi3x2 = vi3x3;
vi4x2 = vi4x3;
vi5x2 = vi5x3;
vo0p0 += vi0x3 * vk03;
vo1p0 += vi1x3 * vk03;
vo0p0 += vi1x3 * vk13;
vo1p0 += vi2x3 * vk13;
vo0p0 += vi2x3 * vk23;
vo1p0 += vi3x3 * vk23;
vo0p0 += vi3x3 * vk33;
vo1p0 += vi4x3 * vk33;
vo0p0 += vi4x3 * vk43;
vo1p0 += vi5x3 * vk43;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
*o1++ = vo1;
*o0++ = vo0;
}
w -= 1 * sizeof(float);
}
assert(w == 1 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo1p0 += vi2x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo1p0 += vi3x0 * vk20;
vo0p0 += vi3x0 * vk30;
vo1p0 += vi4x0 * vk30;
vo0p0 += vi4x0 * vk40;
vo1p0 += vi5x0 * vk40;
vo0p0 += vi0x1 * vk01;
vo1p0 += vi1x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo0p0 += vi2x1 * vk21;
vo1p0 += vi3x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo1p0 += vi4x1 * vk31;
vo0p0 += vi4x1 * vk41;
vo1p0 += vi5x1 * vk41;
vo0p0 += vi0x2 * vk02;
vo1p0 += vi1x2 * vk02;
vo0p0 += vi1x2 * vk12;
vo1p0 += vi2x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo1p0 += vi3x2 * vk22;
vo0p0 += vi3x2 * vk32;
vo1p0 += vi4x2 * vk32;
vo0p0 += vi4x2 * vk42;
vo1p0 += vi5x2 * vk42;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
*o1++ = vo1;
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i2 - input_width);
i1 = (const float*) ((uintptr_t) i3 - input_width);
i2 = i3;
i3 = i4;
i4 = i5;
i5 = (const float*) ((uintptr_t) i4 + input_width);
o0 = o1;
o1 = (float*) ((uintptr_t) o0 + input_width);
output_height = doz(output_height, 2);
} while (output_height != 0);
}
| 9,172 | 24.985836 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-scalar-3x1-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_3x1_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk03 = weights[4];
const float vk04 = weights[5];
const float vk10 = weights[6];
const float vk11 = weights[7];
const float vk12 = weights[8];
const float vk13 = weights[9];
const float vk14 = weights[10];
const float vk20 = weights[11];
const float vk21 = weights[12];
const float vk22 = weights[13];
const float vk23 = weights[14];
const float vk24 = weights[15];
const float vk30 = weights[16];
const float vk31 = weights[17];
const float vk32 = weights[18];
const float vk33 = weights[19];
const float vk34 = weights[20];
const float vk40 = weights[21];
const float vk41 = weights[22];
const float vk42 = weights[23];
const float vk43 = weights[24];
const float vk44 = weights[25];
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i5 = zero;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i6 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi3x0 = 0.0f;
float vi4x0 = 0.0f;
float vi5x0 = 0.0f;
float vi6x0 = 0.0f;
float vi0x1 = 0.0f;
float vi1x1 = 0.0f;
float vi2x1 = 0.0f;
float vi3x1 = 0.0f;
float vi4x1 = 0.0f;
float vi5x1 = 0.0f;
float vi6x1 = 0.0f;
float vi0x2 = *i0++;
float vi1x2 = *i1++;
float vi2x2 = *i2++;
float vi3x2 = *i3++;
float vi4x2 = *i4++;
float vi5x2 = *i5++;
float vi6x2 = *i6++;
size_t w = input_width;
if (w > 1 * sizeof(float)) {
float vi0x3 = *i0++;
float vi1x3 = *i1++;
float vi2x3 = *i2++;
float vi3x3 = *i3++;
float vi4x3 = *i4++;
float vi5x3 = *i5++;
float vi6x3 = *i6++;
for (; w > 2 * sizeof(float); w -= 1 * sizeof(float)) {
const float vi0x4 = *i0++;
const float vi1x4 = *i1++;
const float vi2x4 = *i2++;
const float vi3x4 = *i3++;
const float vi4x4 = *i4++;
const float vi5x4 = *i5++;
const float vi6x4 = *i6++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
float vo2p0 = vbias + vi2x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo1p1 = vi2x0 * vk10;
float vo2p1 = vi3x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo1p0 += vi3x0 * vk20;
vo2p0 += vi4x0 * vk20;
vo0p1 += vi3x0 * vk30;
vo1p1 += vi4x0 * vk30;
vo2p1 += vi5x0 * vk30;
vo0p0 += vi4x0 * vk40;
vo1p0 += vi5x0 * vk40;
vo2p0 += vi6x0 * vk40;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vi4x0 = vi4x1;
vi5x0 = vi5x1;
vi6x0 = vi6x1;
vo0p1 += vi0x1 * vk01;
vo1p1 += vi1x1 * vk01;
vo2p1 += vi2x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo2p0 += vi3x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo1p1 += vi3x1 * vk21;
vo2p1 += vi4x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo1p0 += vi4x1 * vk31;
vo2p0 += vi5x1 * vk31;
vo0p1 += vi4x1 * vk41;
vo1p1 += vi5x1 * vk41;
vo2p1 += vi6x1 * vk41;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vi4x1 = vi4x2;
vi5x1 = vi5x2;
vi6x1 = vi6x2;
vo0p0 += vi0x2 * vk02;
vo1p0 += vi1x2 * vk02;
vo2p0 += vi2x2 * vk02;
vo0p1 += vi1x2 * vk12;
vo1p1 += vi2x2 * vk12;
vo2p1 += vi3x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo1p0 += vi3x2 * vk22;
vo2p0 += vi4x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo1p1 += vi4x2 * vk32;
vo2p1 += vi5x2 * vk32;
vo0p0 += vi4x2 * vk42;
vo1p0 += vi5x2 * vk42;
vo2p0 += vi6x2 * vk42;
vi0x2 = vi0x3;
vi1x2 = vi1x3;
vi2x2 = vi2x3;
vi3x2 = vi3x3;
vi4x2 = vi4x3;
vi5x2 = vi5x3;
vi6x2 = vi6x3;
vo0p1 += vi0x3 * vk03;
vo1p1 += vi1x3 * vk03;
vo2p1 += vi2x3 * vk03;
vo0p0 += vi1x3 * vk13;
vo1p0 += vi2x3 * vk13;
vo2p0 += vi3x3 * vk13;
vo0p1 += vi2x3 * vk23;
vo1p1 += vi3x3 * vk23;
vo2p1 += vi4x3 * vk23;
vo0p0 += vi3x3 * vk33;
vo1p0 += vi4x3 * vk33;
vo2p0 += vi5x3 * vk33;
vo0p1 += vi4x3 * vk43;
vo1p1 += vi5x3 * vk43;
vo2p1 += vi6x3 * vk43;
vi0x3 = vi0x4;
vi1x3 = vi1x4;
vi2x3 = vi2x4;
vi3x3 = vi3x4;
vi4x3 = vi4x4;
vi5x3 = vi5x4;
vi6x3 = vi6x4;
vo0p0 += vi0x4 * vk04;
vo1p0 += vi1x4 * vk04;
vo2p0 += vi2x4 * vk04;
vo0p1 += vi1x4 * vk14;
vo1p1 += vi2x4 * vk14;
vo2p1 += vi3x4 * vk14;
vo0p0 += vi2x4 * vk24;
vo1p0 += vi3x4 * vk24;
vo2p0 += vi4x4 * vk24;
vo0p1 += vi3x4 * vk34;
vo1p1 += vi4x4 * vk34;
vo2p1 += vi5x4 * vk34;
vo0p0 += vi4x4 * vk44;
vo1p0 += vi5x4 * vk44;
vo2p0 += vi6x4 * vk44;
vo0p0 += vo0p1;
vo1p0 += vo1p1;
vo2p0 += vo2p1;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
float vo2 = math_max_f32(vo2p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
vo2 = math_min_f32(vo2, vmax);
*o2++ = vo2;
*o1++ = vo1;
*o0++ = vo0;
}
assert(w == 2 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
float vo2p0 = vbias + vi2x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo1p1 = vi2x0 * vk10;
float vo2p1 = vi3x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo1p0 += vi3x0 * vk20;
vo2p0 += vi4x0 * vk20;
vo0p1 += vi3x0 * vk30;
vo1p1 += vi4x0 * vk30;
vo2p1 += vi5x0 * vk30;
vo0p0 += vi4x0 * vk40;
vo1p0 += vi5x0 * vk40;
vo2p0 += vi6x0 * vk40;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vi4x0 = vi4x1;
vi5x0 = vi5x1;
vi6x0 = vi6x1;
vo0p1 += vi0x1 * vk01;
vo1p1 += vi1x1 * vk01;
vo2p1 += vi2x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo2p0 += vi3x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo1p1 += vi3x1 * vk21;
vo2p1 += vi4x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo1p0 += vi4x1 * vk31;
vo2p0 += vi5x1 * vk31;
vo0p1 += vi4x1 * vk41;
vo1p1 += vi5x1 * vk41;
vo2p1 += vi6x1 * vk41;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vi4x1 = vi4x2;
vi5x1 = vi5x2;
vi6x1 = vi6x2;
vo0p0 += vi0x2 * vk02;
vo1p0 += vi1x2 * vk02;
vo2p0 += vi2x2 * vk02;
vo0p1 += vi1x2 * vk12;
vo1p1 += vi2x2 * vk12;
vo2p1 += vi3x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo1p0 += vi3x2 * vk22;
vo2p0 += vi4x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo1p1 += vi4x2 * vk32;
vo2p1 += vi5x2 * vk32;
vo0p0 += vi4x2 * vk42;
vo1p0 += vi5x2 * vk42;
vo2p0 += vi6x2 * vk42;
vi0x2 = vi0x3;
vi1x2 = vi1x3;
vi2x2 = vi2x3;
vi3x2 = vi3x3;
vi4x2 = vi4x3;
vi5x2 = vi5x3;
vi6x2 = vi6x3;
vo0p1 += vi0x3 * vk03;
vo1p1 += vi1x3 * vk03;
vo2p1 += vi2x3 * vk03;
vo0p0 += vi1x3 * vk13;
vo1p0 += vi2x3 * vk13;
vo2p0 += vi3x3 * vk13;
vo0p1 += vi2x3 * vk23;
vo1p1 += vi3x3 * vk23;
vo2p1 += vi4x3 * vk23;
vo0p0 += vi3x3 * vk33;
vo1p0 += vi4x3 * vk33;
vo2p0 += vi5x3 * vk33;
vo0p1 += vi4x3 * vk43;
vo1p1 += vi5x3 * vk43;
vo2p1 += vi6x3 * vk43;
vo0p0 += vo0p1;
vo1p0 += vo1p1;
vo2p0 += vo2p1;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
float vo2 = math_max_f32(vo2p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
vo2 = math_min_f32(vo2, vmax);
*o2++ = vo2;
*o1++ = vo1;
*o0++ = vo0;
}
w -= 1 * sizeof(float);
}
assert(w == 1 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
float vo2p0 = vbias + vi2x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo1p1 = vi2x0 * vk10;
float vo2p1 = vi3x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo1p0 += vi3x0 * vk20;
vo2p0 += vi4x0 * vk20;
vo0p1 += vi3x0 * vk30;
vo1p1 += vi4x0 * vk30;
vo2p1 += vi5x0 * vk30;
vo0p0 += vi4x0 * vk40;
vo1p0 += vi5x0 * vk40;
vo2p0 += vi6x0 * vk40;
vo0p1 += vi0x1 * vk01;
vo1p1 += vi1x1 * vk01;
vo2p1 += vi2x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo2p0 += vi3x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo1p1 += vi3x1 * vk21;
vo2p1 += vi4x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo1p0 += vi4x1 * vk31;
vo2p0 += vi5x1 * vk31;
vo0p1 += vi4x1 * vk41;
vo1p1 += vi5x1 * vk41;
vo2p1 += vi6x1 * vk41;
vo0p0 += vi0x2 * vk02;
vo1p0 += vi1x2 * vk02;
vo2p0 += vi2x2 * vk02;
vo0p1 += vi1x2 * vk12;
vo1p1 += vi2x2 * vk12;
vo2p1 += vi3x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo1p0 += vi3x2 * vk22;
vo2p0 += vi4x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo1p1 += vi4x2 * vk32;
vo2p1 += vi5x2 * vk32;
vo0p0 += vi4x2 * vk42;
vo1p0 += vi5x2 * vk42;
vo2p0 += vi6x2 * vk42;
vo0p0 += vo0p1;
vo1p0 += vo1p1;
vo2p0 += vo2p1;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
float vo2 = math_max_f32(vo2p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
vo2 = math_min_f32(vo2, vmax);
*o2++ = vo2;
*o1++ = vo1;
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i3 - input_width);
i1 = (const float*) ((uintptr_t) i4 - input_width);
i2 = i4;
i3 = i5;
i4 = i6;
i5 = (const float*) ((uintptr_t) i4 + input_width);
i6 = (const float*) ((uintptr_t) i5 + input_width);
o0 = o2;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
output_height = doz(output_height, 3);
} while (output_height != 0);
}
| 12,224 | 26.10643 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-scalar-3x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_3x1(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk03 = weights[4];
const float vk04 = weights[5];
const float vk10 = weights[6];
const float vk11 = weights[7];
const float vk12 = weights[8];
const float vk13 = weights[9];
const float vk14 = weights[10];
const float vk20 = weights[11];
const float vk21 = weights[12];
const float vk22 = weights[13];
const float vk23 = weights[14];
const float vk24 = weights[15];
const float vk30 = weights[16];
const float vk31 = weights[17];
const float vk32 = weights[18];
const float vk33 = weights[19];
const float vk34 = weights[20];
const float vk40 = weights[21];
const float vk41 = weights[22];
const float vk42 = weights[23];
const float vk43 = weights[24];
const float vk44 = weights[25];
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
float* o0 = output;
float* o1 = (float*) ((uintptr_t) o0 + input_width);
float* o2 = (float*) ((uintptr_t) o1 + input_width);
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
o1 = o0;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
o2 = o1;
}
if XNN_UNPREDICTABLE(output_height < 4) {
i5 = zero;
}
if XNN_UNPREDICTABLE(output_height < 5) {
i6 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi3x0 = 0.0f;
float vi4x0 = 0.0f;
float vi5x0 = 0.0f;
float vi6x0 = 0.0f;
float vi0x1 = 0.0f;
float vi1x1 = 0.0f;
float vi2x1 = 0.0f;
float vi3x1 = 0.0f;
float vi4x1 = 0.0f;
float vi5x1 = 0.0f;
float vi6x1 = 0.0f;
float vi0x2 = *i0++;
float vi1x2 = *i1++;
float vi2x2 = *i2++;
float vi3x2 = *i3++;
float vi4x2 = *i4++;
float vi5x2 = *i5++;
float vi6x2 = *i6++;
size_t w = input_width;
if (w > 1 * sizeof(float)) {
float vi0x3 = *i0++;
float vi1x3 = *i1++;
float vi2x3 = *i2++;
float vi3x3 = *i3++;
float vi4x3 = *i4++;
float vi5x3 = *i5++;
float vi6x3 = *i6++;
for (; w > 2 * sizeof(float); w -= 1 * sizeof(float)) {
const float vi0x4 = *i0++;
const float vi1x4 = *i1++;
const float vi2x4 = *i2++;
const float vi3x4 = *i3++;
const float vi4x4 = *i4++;
const float vi5x4 = *i5++;
const float vi6x4 = *i6++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
float vo2p0 = vbias + vi2x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo1p0 += vi2x0 * vk10;
vo2p0 += vi3x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo1p0 += vi3x0 * vk20;
vo2p0 += vi4x0 * vk20;
vo0p0 += vi3x0 * vk30;
vo1p0 += vi4x0 * vk30;
vo2p0 += vi5x0 * vk30;
vo0p0 += vi4x0 * vk40;
vo1p0 += vi5x0 * vk40;
vo2p0 += vi6x0 * vk40;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vi4x0 = vi4x1;
vi5x0 = vi5x1;
vi6x0 = vi6x1;
vo0p0 += vi0x1 * vk01;
vo1p0 += vi1x1 * vk01;
vo2p0 += vi2x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo2p0 += vi3x1 * vk11;
vo0p0 += vi2x1 * vk21;
vo1p0 += vi3x1 * vk21;
vo2p0 += vi4x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo1p0 += vi4x1 * vk31;
vo2p0 += vi5x1 * vk31;
vo0p0 += vi4x1 * vk41;
vo1p0 += vi5x1 * vk41;
vo2p0 += vi6x1 * vk41;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vi4x1 = vi4x2;
vi5x1 = vi5x2;
vi6x1 = vi6x2;
vo0p0 += vi0x2 * vk02;
vo1p0 += vi1x2 * vk02;
vo2p0 += vi2x2 * vk02;
vo0p0 += vi1x2 * vk12;
vo1p0 += vi2x2 * vk12;
vo2p0 += vi3x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo1p0 += vi3x2 * vk22;
vo2p0 += vi4x2 * vk22;
vo0p0 += vi3x2 * vk32;
vo1p0 += vi4x2 * vk32;
vo2p0 += vi5x2 * vk32;
vo0p0 += vi4x2 * vk42;
vo1p0 += vi5x2 * vk42;
vo2p0 += vi6x2 * vk42;
vi0x2 = vi0x3;
vi1x2 = vi1x3;
vi2x2 = vi2x3;
vi3x2 = vi3x3;
vi4x2 = vi4x3;
vi5x2 = vi5x3;
vi6x2 = vi6x3;
vo0p0 += vi0x3 * vk03;
vo1p0 += vi1x3 * vk03;
vo2p0 += vi2x3 * vk03;
vo0p0 += vi1x3 * vk13;
vo1p0 += vi2x3 * vk13;
vo2p0 += vi3x3 * vk13;
vo0p0 += vi2x3 * vk23;
vo1p0 += vi3x3 * vk23;
vo2p0 += vi4x3 * vk23;
vo0p0 += vi3x3 * vk33;
vo1p0 += vi4x3 * vk33;
vo2p0 += vi5x3 * vk33;
vo0p0 += vi4x3 * vk43;
vo1p0 += vi5x3 * vk43;
vo2p0 += vi6x3 * vk43;
vi0x3 = vi0x4;
vi1x3 = vi1x4;
vi2x3 = vi2x4;
vi3x3 = vi3x4;
vi4x3 = vi4x4;
vi5x3 = vi5x4;
vi6x3 = vi6x4;
vo0p0 += vi0x4 * vk04;
vo1p0 += vi1x4 * vk04;
vo2p0 += vi2x4 * vk04;
vo0p0 += vi1x4 * vk14;
vo1p0 += vi2x4 * vk14;
vo2p0 += vi3x4 * vk14;
vo0p0 += vi2x4 * vk24;
vo1p0 += vi3x4 * vk24;
vo2p0 += vi4x4 * vk24;
vo0p0 += vi3x4 * vk34;
vo1p0 += vi4x4 * vk34;
vo2p0 += vi5x4 * vk34;
vo0p0 += vi4x4 * vk44;
vo1p0 += vi5x4 * vk44;
vo2p0 += vi6x4 * vk44;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
float vo2 = math_max_f32(vo2p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
vo2 = math_min_f32(vo2, vmax);
*o2++ = vo2;
*o1++ = vo1;
*o0++ = vo0;
}
assert(w == 2 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
float vo2p0 = vbias + vi2x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo1p0 += vi2x0 * vk10;
vo2p0 += vi3x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo1p0 += vi3x0 * vk20;
vo2p0 += vi4x0 * vk20;
vo0p0 += vi3x0 * vk30;
vo1p0 += vi4x0 * vk30;
vo2p0 += vi5x0 * vk30;
vo0p0 += vi4x0 * vk40;
vo1p0 += vi5x0 * vk40;
vo2p0 += vi6x0 * vk40;
vi0x0 = vi0x1;
vi1x0 = vi1x1;
vi2x0 = vi2x1;
vi3x0 = vi3x1;
vi4x0 = vi4x1;
vi5x0 = vi5x1;
vi6x0 = vi6x1;
vo0p0 += vi0x1 * vk01;
vo1p0 += vi1x1 * vk01;
vo2p0 += vi2x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo2p0 += vi3x1 * vk11;
vo0p0 += vi2x1 * vk21;
vo1p0 += vi3x1 * vk21;
vo2p0 += vi4x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo1p0 += vi4x1 * vk31;
vo2p0 += vi5x1 * vk31;
vo0p0 += vi4x1 * vk41;
vo1p0 += vi5x1 * vk41;
vo2p0 += vi6x1 * vk41;
vi0x1 = vi0x2;
vi1x1 = vi1x2;
vi2x1 = vi2x2;
vi3x1 = vi3x2;
vi4x1 = vi4x2;
vi5x1 = vi5x2;
vi6x1 = vi6x2;
vo0p0 += vi0x2 * vk02;
vo1p0 += vi1x2 * vk02;
vo2p0 += vi2x2 * vk02;
vo0p0 += vi1x2 * vk12;
vo1p0 += vi2x2 * vk12;
vo2p0 += vi3x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo1p0 += vi3x2 * vk22;
vo2p0 += vi4x2 * vk22;
vo0p0 += vi3x2 * vk32;
vo1p0 += vi4x2 * vk32;
vo2p0 += vi5x2 * vk32;
vo0p0 += vi4x2 * vk42;
vo1p0 += vi5x2 * vk42;
vo2p0 += vi6x2 * vk42;
vi0x2 = vi0x3;
vi1x2 = vi1x3;
vi2x2 = vi2x3;
vi3x2 = vi3x3;
vi4x2 = vi4x3;
vi5x2 = vi5x3;
vi6x2 = vi6x3;
vo0p0 += vi0x3 * vk03;
vo1p0 += vi1x3 * vk03;
vo2p0 += vi2x3 * vk03;
vo0p0 += vi1x3 * vk13;
vo1p0 += vi2x3 * vk13;
vo2p0 += vi3x3 * vk13;
vo0p0 += vi2x3 * vk23;
vo1p0 += vi3x3 * vk23;
vo2p0 += vi4x3 * vk23;
vo0p0 += vi3x3 * vk33;
vo1p0 += vi4x3 * vk33;
vo2p0 += vi5x3 * vk33;
vo0p0 += vi4x3 * vk43;
vo1p0 += vi5x3 * vk43;
vo2p0 += vi6x3 * vk43;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
float vo2 = math_max_f32(vo2p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
vo2 = math_min_f32(vo2, vmax);
*o2++ = vo2;
*o1++ = vo1;
*o0++ = vo0;
}
w -= 1 * sizeof(float);
}
assert(w == 1 * sizeof(float));
{
float vo0p0 = vbias + vi0x0 * vk00;
float vo1p0 = vbias + vi1x0 * vk00;
float vo2p0 = vbias + vi2x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo1p0 += vi2x0 * vk10;
vo2p0 += vi3x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo1p0 += vi3x0 * vk20;
vo2p0 += vi4x0 * vk20;
vo0p0 += vi3x0 * vk30;
vo1p0 += vi4x0 * vk30;
vo2p0 += vi5x0 * vk30;
vo0p0 += vi4x0 * vk40;
vo1p0 += vi5x0 * vk40;
vo2p0 += vi6x0 * vk40;
vo0p0 += vi0x1 * vk01;
vo1p0 += vi1x1 * vk01;
vo2p0 += vi2x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo1p0 += vi2x1 * vk11;
vo2p0 += vi3x1 * vk11;
vo0p0 += vi2x1 * vk21;
vo1p0 += vi3x1 * vk21;
vo2p0 += vi4x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo1p0 += vi4x1 * vk31;
vo2p0 += vi5x1 * vk31;
vo0p0 += vi4x1 * vk41;
vo1p0 += vi5x1 * vk41;
vo2p0 += vi6x1 * vk41;
vo0p0 += vi0x2 * vk02;
vo1p0 += vi1x2 * vk02;
vo2p0 += vi2x2 * vk02;
vo0p0 += vi1x2 * vk12;
vo1p0 += vi2x2 * vk12;
vo2p0 += vi3x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo1p0 += vi3x2 * vk22;
vo2p0 += vi4x2 * vk22;
vo0p0 += vi3x2 * vk32;
vo1p0 += vi4x2 * vk32;
vo2p0 += vi5x2 * vk32;
vo0p0 += vi4x2 * vk42;
vo1p0 += vi5x2 * vk42;
vo2p0 += vi6x2 * vk42;
float vo0 = math_max_f32(vo0p0, vmin);
float vo1 = math_max_f32(vo1p0, vmin);
float vo2 = math_max_f32(vo2p0, vmin);
vo0 = math_min_f32(vo0, vmax);
vo1 = math_min_f32(vo1, vmax);
vo2 = math_min_f32(vo2, vmax);
*o2++ = vo2;
*o1++ = vo1;
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i3 - input_width);
i1 = (const float*) ((uintptr_t) i4 - input_width);
i2 = i4;
i3 = i5;
i4 = i6;
i5 = (const float*) ((uintptr_t) i4 + input_width);
i6 = (const float*) ((uintptr_t) i5 + input_width);
o0 = o2;
o1 = (float*) ((uintptr_t) o0 + input_width);
o2 = (float*) ((uintptr_t) o1 + input_width);
output_height = doz(output_height, 3);
} while (output_height != 0);
}
| 11,964 | 26.070136 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-sse-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__sse_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk03 = _mm_load1_ps(weights + 4);
const __m128 vk04 = _mm_load1_ps(weights + 5);
const __m128 vk10 = _mm_load1_ps(weights + 6);
const __m128 vk11 = _mm_load1_ps(weights + 7);
const __m128 vk12 = _mm_load1_ps(weights + 8);
const __m128 vk13 = _mm_load1_ps(weights + 9);
const __m128 vk14 = _mm_load1_ps(weights + 10);
const __m128 vk20 = _mm_load1_ps(weights + 11);
const __m128 vk21 = _mm_load1_ps(weights + 12);
const __m128 vk22 = _mm_load1_ps(weights + 13);
const __m128 vk23 = _mm_load1_ps(weights + 14);
const __m128 vk24 = _mm_load1_ps(weights + 15);
const __m128 vk30 = _mm_load1_ps(weights + 16);
const __m128 vk31 = _mm_load1_ps(weights + 17);
const __m128 vk32 = _mm_load1_ps(weights + 18);
const __m128 vk33 = _mm_load1_ps(weights + 19);
const __m128 vk34 = _mm_load1_ps(weights + 20);
const __m128 vk40 = _mm_load1_ps(weights + 21);
const __m128 vk41 = _mm_load1_ps(weights + 22);
const __m128 vk42 = _mm_load1_ps(weights + 23);
const __m128 vk43 = _mm_load1_ps(weights + 24);
const __m128 vk44 = _mm_load1_ps(weights + 25);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
__m128 vi0x3012 = _mm_setzero_ps();
__m128 vi1x3012 = _mm_setzero_ps();
__m128 vi2x3012 = _mm_setzero_ps();
__m128 vi3x3012 = _mm_setzero_ps();
__m128 vi4x3012 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vi3x4567 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vi4x4567 = _mm_loadu_ps(i4);
i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk02));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk12);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk22));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x4567, vk32));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x4567, vk42));
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3x89AB = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4x89AB = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x3456, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk11));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk21));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x3456, vk31));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x3456, vk41));
const __m128 vi0x2345 = _mm_shuffle_ps(vi0x3012, vi0x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi0x3012 = vi0x7456;
const __m128 vi1x2345 = _mm_shuffle_ps(vi1x3012, vi1x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi1x3012 = vi1x7456;
const __m128 vi2x2345 = _mm_shuffle_ps(vi2x3012, vi2x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi2x3012 = vi2x7456;
const __m128 vi3x2345 = _mm_shuffle_ps(vi3x3012, vi3x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi3x3012 = vi3x7456;
const __m128 vi4x2345 = _mm_shuffle_ps(vi4x3012, vi4x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi4x3012 = vi4x7456;
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
vi0x4567 = vi0x89AB;
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
vi1x4567 = vi1x89AB;
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
vi2x4567 = vi2x89AB;
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
vi3x4567 = vi3x89AB;
const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vi4x89AB);
vi4x4567 = vi4x89AB;
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x2345, vk00));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x2345, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x2345, vk20));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x2345, vk30));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x2345, vk40));
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x5678, vk03));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk13));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x5678, vk23));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x5678, vk33));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x5678, vk43));
const __m128 vi0x6789 = _mm_shuffle_ps(vi0x5678, vi0x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi1x6789 = _mm_shuffle_ps(vi1x5678, vi1x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi2x6789 = _mm_shuffle_ps(vi2x5678, vi2x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi3x6789 = _mm_shuffle_ps(vi3x5678, vi3x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi4x6789 = _mm_shuffle_ps(vi4x5678, vi4x89AB, _MM_SHUFFLE(1, 0, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x6789, vk04));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x6789, vk14));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x6789, vk24));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x6789, vk34));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x6789, vk44));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk02));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk12);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk22));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x4567, vk32));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x4567, vk42));
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi0x89AB = _mm_and_ps(_mm_loadu_ps(i0), vmask);
i0 += 4;
const __m128 vi1x89AB = _mm_and_ps(_mm_loadu_ps(i1), vmask);
i1 += 4;
const __m128 vi2x89AB = _mm_and_ps(_mm_loadu_ps(i2), vmask);
i2 += 4;
const __m128 vi3x89AB = _mm_and_ps(_mm_loadu_ps(i3), vmask);
i3 += 4;
const __m128 vi4x89AB = _mm_and_ps(_mm_loadu_ps(i4), vmask);
i4 += 4;
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x3456, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk11));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk21));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x3456, vk31));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x3456, vk41));
const __m128 vi0x2345 = _mm_shuffle_ps(vi0x3012, vi0x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi0x3012 = vi0x7456;
const __m128 vi1x2345 = _mm_shuffle_ps(vi1x3012, vi1x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi1x3012 = vi1x7456;
const __m128 vi2x2345 = _mm_shuffle_ps(vi2x3012, vi2x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi2x3012 = vi2x7456;
const __m128 vi3x2345 = _mm_shuffle_ps(vi3x3012, vi3x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi3x3012 = vi3x7456;
const __m128 vi4x2345 = _mm_shuffle_ps(vi4x3012, vi4x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi4x3012 = vi4x7456;
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
vi0x4567 = vi0x89AB;
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
vi1x4567 = vi1x89AB;
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
vi2x4567 = vi2x89AB;
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
vi3x4567 = vi3x89AB;
const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vi4x89AB);
vi4x4567 = vi4x89AB;
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x2345, vk00));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x2345, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x2345, vk20));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x2345, vk30));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x2345, vk40));
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x5678, vk03));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk13));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x5678, vk23));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x5678, vk33));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x5678, vk43));
const __m128 vi0x6789 = _mm_shuffle_ps(vi0x5678, vi0x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi1x6789 = _mm_shuffle_ps(vi1x5678, vi1x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi2x6789 = _mm_shuffle_ps(vi2x5678, vi2x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi3x6789 = _mm_shuffle_ps(vi3x5678, vi3x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi4x6789 = _mm_shuffle_ps(vi4x5678, vi4x89AB, _MM_SHUFFLE(1, 0, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x6789, vk04));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x6789, vk14));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x6789, vk24));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x6789, vk34));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x6789, vk44));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vi0x4567, vmask);
vi1x4567 = _mm_and_ps(vi1x4567, vmask);
vi2x4567 = _mm_and_ps(vi2x4567, vmask);
vi3x4567 = _mm_and_ps(vi3x4567, vmask);
vi4x4567 = _mm_and_ps(vi4x4567, vmask);
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk02));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk12);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk22));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x4567, vk32));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x4567, vk42));
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x3456, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk11));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk21));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x3456, vk31));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x3456, vk41));
const __m128 vi0x2345 = _mm_shuffle_ps(vi0x3012, vi0x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1x2345 = _mm_shuffle_ps(vi1x3012, vi1x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2x2345 = _mm_shuffle_ps(vi2x3012, vi2x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3x2345 = _mm_shuffle_ps(vi3x3012, vi3x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4x2345 = _mm_shuffle_ps(vi4x3012, vi4x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vzero = _mm_setzero_ps();
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vzero);
const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vzero);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x2345, vk00));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x2345, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x2345, vk20));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x2345, vk30));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x2345, vk40));
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x5678, vk03));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk13));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x5678, vk23));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x5678, vk33));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x5678, vk43));
const __m128 vi0x6789 = _mm_shuffle_ps(vi0x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi1x6789 = _mm_shuffle_ps(vi1x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi2x6789 = _mm_shuffle_ps(vi2x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi3x6789 = _mm_shuffle_ps(vi3x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi4x6789 = _mm_shuffle_ps(vi4x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x6789, vk04));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x6789, vk14));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x6789, vk24));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x6789, vk34));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x6789, vk44));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 18,204 | 43.839901 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-sse-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__sse_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk03 = _mm_load1_ps(weights + 4);
const __m128 vk04 = _mm_load1_ps(weights + 5);
const __m128 vk10 = _mm_load1_ps(weights + 6);
const __m128 vk11 = _mm_load1_ps(weights + 7);
const __m128 vk12 = _mm_load1_ps(weights + 8);
const __m128 vk13 = _mm_load1_ps(weights + 9);
const __m128 vk14 = _mm_load1_ps(weights + 10);
const __m128 vk20 = _mm_load1_ps(weights + 11);
const __m128 vk21 = _mm_load1_ps(weights + 12);
const __m128 vk22 = _mm_load1_ps(weights + 13);
const __m128 vk23 = _mm_load1_ps(weights + 14);
const __m128 vk24 = _mm_load1_ps(weights + 15);
const __m128 vk30 = _mm_load1_ps(weights + 16);
const __m128 vk31 = _mm_load1_ps(weights + 17);
const __m128 vk32 = _mm_load1_ps(weights + 18);
const __m128 vk33 = _mm_load1_ps(weights + 19);
const __m128 vk34 = _mm_load1_ps(weights + 20);
const __m128 vk40 = _mm_load1_ps(weights + 21);
const __m128 vk41 = _mm_load1_ps(weights + 22);
const __m128 vk42 = _mm_load1_ps(weights + 23);
const __m128 vk43 = _mm_load1_ps(weights + 24);
const __m128 vk44 = _mm_load1_ps(weights + 25);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
__m128 vi0x3012 = _mm_setzero_ps();
__m128 vi1x3012 = _mm_setzero_ps();
__m128 vi2x3012 = _mm_setzero_ps();
__m128 vi3x3012 = _mm_setzero_ps();
__m128 vi4x3012 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vi3x4567 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vi4x4567 = _mm_loadu_ps(i4);
i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk02));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk12);
__m128 vo0p2 = _mm_mul_ps(vi2x4567, vk22);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x4567, vk32));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x4567, vk42));
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3x89AB = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4x89AB = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x3456, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk11));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk21));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi3x3456, vk31));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x3456, vk41));
const __m128 vi0x2345 = _mm_shuffle_ps(vi0x3012, vi0x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi0x3012 = vi0x7456;
const __m128 vi1x2345 = _mm_shuffle_ps(vi1x3012, vi1x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi1x3012 = vi1x7456;
const __m128 vi2x2345 = _mm_shuffle_ps(vi2x3012, vi2x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi2x3012 = vi2x7456;
const __m128 vi3x2345 = _mm_shuffle_ps(vi3x3012, vi3x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi3x3012 = vi3x7456;
const __m128 vi4x2345 = _mm_shuffle_ps(vi4x3012, vi4x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi4x3012 = vi4x7456;
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
vi0x4567 = vi0x89AB;
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
vi1x4567 = vi1x89AB;
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
vi2x4567 = vi2x89AB;
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
vi3x4567 = vi3x89AB;
const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vi4x89AB);
vi4x4567 = vi4x89AB;
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x2345, vk00));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi1x2345, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x2345, vk20));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x2345, vk30));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi4x2345, vk40));
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk03));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk13));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x5678, vk23));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x5678, vk33));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x5678, vk43));
const __m128 vi0x6789 = _mm_shuffle_ps(vi0x5678, vi0x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi1x6789 = _mm_shuffle_ps(vi1x5678, vi1x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi2x6789 = _mm_shuffle_ps(vi2x5678, vi2x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi3x6789 = _mm_shuffle_ps(vi3x5678, vi3x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi4x6789 = _mm_shuffle_ps(vi4x5678, vi4x89AB, _MM_SHUFFLE(1, 0, 2, 1));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x6789, vk04));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x6789, vk14));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x6789, vk24));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi3x6789, vk34));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x6789, vk44));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk02));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk12);
__m128 vo0p2 = _mm_mul_ps(vi2x4567, vk22);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x4567, vk32));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x4567, vk42));
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi0x89AB = _mm_and_ps(_mm_loadu_ps(i0), vmask);
i0 += 4;
const __m128 vi1x89AB = _mm_and_ps(_mm_loadu_ps(i1), vmask);
i1 += 4;
const __m128 vi2x89AB = _mm_and_ps(_mm_loadu_ps(i2), vmask);
i2 += 4;
const __m128 vi3x89AB = _mm_and_ps(_mm_loadu_ps(i3), vmask);
i3 += 4;
const __m128 vi4x89AB = _mm_and_ps(_mm_loadu_ps(i4), vmask);
i4 += 4;
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x3456, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk11));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk21));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi3x3456, vk31));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x3456, vk41));
const __m128 vi0x2345 = _mm_shuffle_ps(vi0x3012, vi0x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi0x3012 = vi0x7456;
const __m128 vi1x2345 = _mm_shuffle_ps(vi1x3012, vi1x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi1x3012 = vi1x7456;
const __m128 vi2x2345 = _mm_shuffle_ps(vi2x3012, vi2x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi2x3012 = vi2x7456;
const __m128 vi3x2345 = _mm_shuffle_ps(vi3x3012, vi3x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi3x3012 = vi3x7456;
const __m128 vi4x2345 = _mm_shuffle_ps(vi4x3012, vi4x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi4x3012 = vi4x7456;
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
vi0x4567 = vi0x89AB;
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
vi1x4567 = vi1x89AB;
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
vi2x4567 = vi2x89AB;
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
vi3x4567 = vi3x89AB;
const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vi4x89AB);
vi4x4567 = vi4x89AB;
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x2345, vk00));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi1x2345, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x2345, vk20));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x2345, vk30));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi4x2345, vk40));
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk03));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk13));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x5678, vk23));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x5678, vk33));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x5678, vk43));
const __m128 vi0x6789 = _mm_shuffle_ps(vi0x5678, vi0x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi1x6789 = _mm_shuffle_ps(vi1x5678, vi1x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi2x6789 = _mm_shuffle_ps(vi2x5678, vi2x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi3x6789 = _mm_shuffle_ps(vi3x5678, vi3x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi4x6789 = _mm_shuffle_ps(vi4x5678, vi4x89AB, _MM_SHUFFLE(1, 0, 2, 1));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x6789, vk04));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x6789, vk14));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x6789, vk24));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi3x6789, vk34));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x6789, vk44));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vi0x4567, vmask);
vi1x4567 = _mm_and_ps(vi1x4567, vmask);
vi2x4567 = _mm_and_ps(vi2x4567, vmask);
vi3x4567 = _mm_and_ps(vi3x4567, vmask);
vi4x4567 = _mm_and_ps(vi4x4567, vmask);
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk02));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk12);
__m128 vo0p2 = _mm_mul_ps(vi2x4567, vk22);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x4567, vk32));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x4567, vk42));
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x3456, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk11));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk21));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi3x3456, vk31));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x3456, vk41));
const __m128 vi0x2345 = _mm_shuffle_ps(vi0x3012, vi0x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1x2345 = _mm_shuffle_ps(vi1x3012, vi1x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2x2345 = _mm_shuffle_ps(vi2x3012, vi2x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3x2345 = _mm_shuffle_ps(vi3x3012, vi3x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4x2345 = _mm_shuffle_ps(vi4x3012, vi4x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vzero = _mm_setzero_ps();
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vzero);
const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vzero);
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x2345, vk00));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi1x2345, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x2345, vk20));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x2345, vk30));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi4x2345, vk40));
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk03));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk13));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x5678, vk23));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x5678, vk33));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x5678, vk43));
const __m128 vi0x6789 = _mm_shuffle_ps(vi0x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi1x6789 = _mm_shuffle_ps(vi1x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi2x6789 = _mm_shuffle_ps(vi2x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi3x6789 = _mm_shuffle_ps(vi3x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi4x6789 = _mm_shuffle_ps(vi4x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x6789, vk04));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x6789, vk14));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x6789, vk24));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi3x6789, vk34));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x6789, vk44));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 18,288 | 43.716381 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-sse-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__sse_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk03 = _mm_load1_ps(weights + 4);
const __m128 vk04 = _mm_load1_ps(weights + 5);
const __m128 vk10 = _mm_load1_ps(weights + 6);
const __m128 vk11 = _mm_load1_ps(weights + 7);
const __m128 vk12 = _mm_load1_ps(weights + 8);
const __m128 vk13 = _mm_load1_ps(weights + 9);
const __m128 vk14 = _mm_load1_ps(weights + 10);
const __m128 vk20 = _mm_load1_ps(weights + 11);
const __m128 vk21 = _mm_load1_ps(weights + 12);
const __m128 vk22 = _mm_load1_ps(weights + 13);
const __m128 vk23 = _mm_load1_ps(weights + 14);
const __m128 vk24 = _mm_load1_ps(weights + 15);
const __m128 vk30 = _mm_load1_ps(weights + 16);
const __m128 vk31 = _mm_load1_ps(weights + 17);
const __m128 vk32 = _mm_load1_ps(weights + 18);
const __m128 vk33 = _mm_load1_ps(weights + 19);
const __m128 vk34 = _mm_load1_ps(weights + 20);
const __m128 vk40 = _mm_load1_ps(weights + 21);
const __m128 vk41 = _mm_load1_ps(weights + 22);
const __m128 vk42 = _mm_load1_ps(weights + 23);
const __m128 vk43 = _mm_load1_ps(weights + 24);
const __m128 vk44 = _mm_load1_ps(weights + 25);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
__m128 vi0x3012 = _mm_setzero_ps();
__m128 vi1x3012 = _mm_setzero_ps();
__m128 vi2x3012 = _mm_setzero_ps();
__m128 vi3x3012 = _mm_setzero_ps();
__m128 vi4x3012 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vi3x4567 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vi4x4567 = _mm_loadu_ps(i4);
i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk02));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk12);
__m128 vo0p2 = _mm_mul_ps(vi2x4567, vk22);
__m128 vo0p3 = _mm_mul_ps(vi3x4567, vk32);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x4567, vk42));
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3x89AB = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4x89AB = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x3456, vk01));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi1x3456, vk11));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi2x3456, vk21));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x3456, vk31));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x3456, vk41));
const __m128 vi0x2345 = _mm_shuffle_ps(vi0x3012, vi0x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi0x3012 = vi0x7456;
const __m128 vi1x2345 = _mm_shuffle_ps(vi1x3012, vi1x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi1x3012 = vi1x7456;
const __m128 vi2x2345 = _mm_shuffle_ps(vi2x3012, vi2x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi2x3012 = vi2x7456;
const __m128 vi3x2345 = _mm_shuffle_ps(vi3x3012, vi3x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi3x3012 = vi3x7456;
const __m128 vi4x2345 = _mm_shuffle_ps(vi4x3012, vi4x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi4x3012 = vi4x7456;
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
vi0x4567 = vi0x89AB;
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
vi1x4567 = vi1x89AB;
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
vi2x4567 = vi2x89AB;
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
vi3x4567 = vi3x89AB;
const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vi4x89AB);
vi4x4567 = vi4x89AB;
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x2345, vk00));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi1x2345, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x2345, vk20));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x2345, vk30));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi4x2345, vk40));
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi0x5678, vk03));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk13));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x5678, vk23));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi3x5678, vk33));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi4x5678, vk43));
const __m128 vi0x6789 = _mm_shuffle_ps(vi0x5678, vi0x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi1x6789 = _mm_shuffle_ps(vi1x5678, vi1x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi2x6789 = _mm_shuffle_ps(vi2x5678, vi2x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi3x6789 = _mm_shuffle_ps(vi3x5678, vi3x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi4x6789 = _mm_shuffle_ps(vi4x5678, vi4x89AB, _MM_SHUFFLE(1, 0, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x6789, vk04));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x6789, vk14));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x6789, vk24));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3x6789, vk34));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x6789, vk44));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p2 = _mm_add_ps(vo0p2, vo0p3);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk02));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk12);
__m128 vo0p2 = _mm_mul_ps(vi2x4567, vk22);
__m128 vo0p3 = _mm_mul_ps(vi3x4567, vk32);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x4567, vk42));
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi0x89AB = _mm_and_ps(_mm_loadu_ps(i0), vmask);
i0 += 4;
const __m128 vi1x89AB = _mm_and_ps(_mm_loadu_ps(i1), vmask);
i1 += 4;
const __m128 vi2x89AB = _mm_and_ps(_mm_loadu_ps(i2), vmask);
i2 += 4;
const __m128 vi3x89AB = _mm_and_ps(_mm_loadu_ps(i3), vmask);
i3 += 4;
const __m128 vi4x89AB = _mm_and_ps(_mm_loadu_ps(i4), vmask);
i4 += 4;
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x3456, vk01));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi1x3456, vk11));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi2x3456, vk21));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x3456, vk31));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x3456, vk41));
const __m128 vi0x2345 = _mm_shuffle_ps(vi0x3012, vi0x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi0x3012 = vi0x7456;
const __m128 vi1x2345 = _mm_shuffle_ps(vi1x3012, vi1x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi1x3012 = vi1x7456;
const __m128 vi2x2345 = _mm_shuffle_ps(vi2x3012, vi2x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi2x3012 = vi2x7456;
const __m128 vi3x2345 = _mm_shuffle_ps(vi3x3012, vi3x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi3x3012 = vi3x7456;
const __m128 vi4x2345 = _mm_shuffle_ps(vi4x3012, vi4x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi4x3012 = vi4x7456;
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
vi0x4567 = vi0x89AB;
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
vi1x4567 = vi1x89AB;
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
vi2x4567 = vi2x89AB;
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
vi3x4567 = vi3x89AB;
const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vi4x89AB);
vi4x4567 = vi4x89AB;
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x2345, vk00));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi1x2345, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x2345, vk20));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x2345, vk30));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi4x2345, vk40));
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi0x5678, vk03));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk13));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x5678, vk23));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi3x5678, vk33));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi4x5678, vk43));
const __m128 vi0x6789 = _mm_shuffle_ps(vi0x5678, vi0x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi1x6789 = _mm_shuffle_ps(vi1x5678, vi1x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi2x6789 = _mm_shuffle_ps(vi2x5678, vi2x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi3x6789 = _mm_shuffle_ps(vi3x5678, vi3x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi4x6789 = _mm_shuffle_ps(vi4x5678, vi4x89AB, _MM_SHUFFLE(1, 0, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x6789, vk04));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x6789, vk14));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x6789, vk24));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3x6789, vk34));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x6789, vk44));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p2 = _mm_add_ps(vo0p2, vo0p3);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vi0x4567, vmask);
vi1x4567 = _mm_and_ps(vi1x4567, vmask);
vi2x4567 = _mm_and_ps(vi2x4567, vmask);
vi3x4567 = _mm_and_ps(vi3x4567, vmask);
vi4x4567 = _mm_and_ps(vi4x4567, vmask);
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk02));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk12);
__m128 vo0p2 = _mm_mul_ps(vi2x4567, vk22);
__m128 vo0p3 = _mm_mul_ps(vi3x4567, vk32);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x4567, vk42));
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x3456, vk01));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi1x3456, vk11));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi2x3456, vk21));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x3456, vk31));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x3456, vk41));
const __m128 vi0x2345 = _mm_shuffle_ps(vi0x3012, vi0x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1x2345 = _mm_shuffle_ps(vi1x3012, vi1x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2x2345 = _mm_shuffle_ps(vi2x3012, vi2x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3x2345 = _mm_shuffle_ps(vi3x3012, vi3x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4x2345 = _mm_shuffle_ps(vi4x3012, vi4x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vzero = _mm_setzero_ps();
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vzero);
const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vzero);
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x2345, vk00));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi1x2345, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x2345, vk20));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x2345, vk30));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi4x2345, vk40));
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi0x5678, vk03));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk13));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x5678, vk23));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi3x5678, vk33));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi4x5678, vk43));
const __m128 vi0x6789 = _mm_shuffle_ps(vi0x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi1x6789 = _mm_shuffle_ps(vi1x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi2x6789 = _mm_shuffle_ps(vi2x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi3x6789 = _mm_shuffle_ps(vi3x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi4x6789 = _mm_shuffle_ps(vi4x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x6789, vk04));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x6789, vk14));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x6789, vk24));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3x6789, vk34));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x6789, vk44));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p2 = _mm_add_ps(vo0p2, vo0p3);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 18,372 | 43.59466 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-sse-1x4-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__sse_1x4_acc5(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk03 = _mm_load1_ps(weights + 4);
const __m128 vk04 = _mm_load1_ps(weights + 5);
const __m128 vk10 = _mm_load1_ps(weights + 6);
const __m128 vk11 = _mm_load1_ps(weights + 7);
const __m128 vk12 = _mm_load1_ps(weights + 8);
const __m128 vk13 = _mm_load1_ps(weights + 9);
const __m128 vk14 = _mm_load1_ps(weights + 10);
const __m128 vk20 = _mm_load1_ps(weights + 11);
const __m128 vk21 = _mm_load1_ps(weights + 12);
const __m128 vk22 = _mm_load1_ps(weights + 13);
const __m128 vk23 = _mm_load1_ps(weights + 14);
const __m128 vk24 = _mm_load1_ps(weights + 15);
const __m128 vk30 = _mm_load1_ps(weights + 16);
const __m128 vk31 = _mm_load1_ps(weights + 17);
const __m128 vk32 = _mm_load1_ps(weights + 18);
const __m128 vk33 = _mm_load1_ps(weights + 19);
const __m128 vk34 = _mm_load1_ps(weights + 20);
const __m128 vk40 = _mm_load1_ps(weights + 21);
const __m128 vk41 = _mm_load1_ps(weights + 22);
const __m128 vk42 = _mm_load1_ps(weights + 23);
const __m128 vk43 = _mm_load1_ps(weights + 24);
const __m128 vk44 = _mm_load1_ps(weights + 25);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
__m128 vi0x3012 = _mm_setzero_ps();
__m128 vi1x3012 = _mm_setzero_ps();
__m128 vi2x3012 = _mm_setzero_ps();
__m128 vi3x3012 = _mm_setzero_ps();
__m128 vi4x3012 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vi3x4567 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vi4x4567 = _mm_loadu_ps(i4);
i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk02));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk12);
__m128 vo0p2 = _mm_mul_ps(vi2x4567, vk22);
__m128 vo0p3 = _mm_mul_ps(vi3x4567, vk32);
__m128 vo0p4 = _mm_mul_ps(vi4x4567, vk42);
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3x89AB = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4x89AB = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk01));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x3456, vk11));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x3456, vk21));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3x3456, vk31));
vo0p4 = _mm_add_ps(vo0p4, _mm_mul_ps(vi4x3456, vk41));
const __m128 vi0x2345 = _mm_shuffle_ps(vi0x3012, vi0x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi0x3012 = vi0x7456;
const __m128 vi1x2345 = _mm_shuffle_ps(vi1x3012, vi1x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi1x3012 = vi1x7456;
const __m128 vi2x2345 = _mm_shuffle_ps(vi2x3012, vi2x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi2x3012 = vi2x7456;
const __m128 vi3x2345 = _mm_shuffle_ps(vi3x3012, vi3x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi3x3012 = vi3x7456;
const __m128 vi4x2345 = _mm_shuffle_ps(vi4x3012, vi4x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi4x3012 = vi4x7456;
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
vi0x4567 = vi0x89AB;
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
vi1x4567 = vi1x89AB;
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
vi2x4567 = vi2x89AB;
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
vi3x4567 = vi3x89AB;
const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vi4x89AB);
vi4x4567 = vi4x89AB;
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x2345, vk00));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x2345, vk10));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x2345, vk20));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3x2345, vk30));
vo0p4 = _mm_add_ps(vo0p4, _mm_mul_ps(vi4x2345, vk40));
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk03));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk13));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x5678, vk23));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3x5678, vk33));
vo0p4 = _mm_add_ps(vo0p4, _mm_mul_ps(vi4x5678, vk43));
const __m128 vi0x6789 = _mm_shuffle_ps(vi0x5678, vi0x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi1x6789 = _mm_shuffle_ps(vi1x5678, vi1x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi2x6789 = _mm_shuffle_ps(vi2x5678, vi2x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi3x6789 = _mm_shuffle_ps(vi3x5678, vi3x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi4x6789 = _mm_shuffle_ps(vi4x5678, vi4x89AB, _MM_SHUFFLE(1, 0, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x6789, vk04));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x6789, vk14));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x6789, vk24));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3x6789, vk34));
vo0p4 = _mm_add_ps(vo0p4, _mm_mul_ps(vi4x6789, vk44));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p2 = _mm_add_ps(vo0p2, vo0p3);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
vo0p0 = _mm_add_ps(vo0p0, vo0p4);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk02));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk12);
__m128 vo0p2 = _mm_mul_ps(vi2x4567, vk22);
__m128 vo0p3 = _mm_mul_ps(vi3x4567, vk32);
__m128 vo0p4 = _mm_mul_ps(vi4x4567, vk42);
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi0x89AB = _mm_and_ps(_mm_loadu_ps(i0), vmask);
i0 += 4;
const __m128 vi1x89AB = _mm_and_ps(_mm_loadu_ps(i1), vmask);
i1 += 4;
const __m128 vi2x89AB = _mm_and_ps(_mm_loadu_ps(i2), vmask);
i2 += 4;
const __m128 vi3x89AB = _mm_and_ps(_mm_loadu_ps(i3), vmask);
i3 += 4;
const __m128 vi4x89AB = _mm_and_ps(_mm_loadu_ps(i4), vmask);
i4 += 4;
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk01));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x3456, vk11));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x3456, vk21));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3x3456, vk31));
vo0p4 = _mm_add_ps(vo0p4, _mm_mul_ps(vi4x3456, vk41));
const __m128 vi0x2345 = _mm_shuffle_ps(vi0x3012, vi0x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi0x3012 = vi0x7456;
const __m128 vi1x2345 = _mm_shuffle_ps(vi1x3012, vi1x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi1x3012 = vi1x7456;
const __m128 vi2x2345 = _mm_shuffle_ps(vi2x3012, vi2x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi2x3012 = vi2x7456;
const __m128 vi3x2345 = _mm_shuffle_ps(vi3x3012, vi3x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi3x3012 = vi3x7456;
const __m128 vi4x2345 = _mm_shuffle_ps(vi4x3012, vi4x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi4x3012 = vi4x7456;
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
vi0x4567 = vi0x89AB;
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
vi1x4567 = vi1x89AB;
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
vi2x4567 = vi2x89AB;
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
vi3x4567 = vi3x89AB;
const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vi4x89AB);
vi4x4567 = vi4x89AB;
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x2345, vk00));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x2345, vk10));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x2345, vk20));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3x2345, vk30));
vo0p4 = _mm_add_ps(vo0p4, _mm_mul_ps(vi4x2345, vk40));
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk03));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk13));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x5678, vk23));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3x5678, vk33));
vo0p4 = _mm_add_ps(vo0p4, _mm_mul_ps(vi4x5678, vk43));
const __m128 vi0x6789 = _mm_shuffle_ps(vi0x5678, vi0x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi1x6789 = _mm_shuffle_ps(vi1x5678, vi1x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi2x6789 = _mm_shuffle_ps(vi2x5678, vi2x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi3x6789 = _mm_shuffle_ps(vi3x5678, vi3x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi4x6789 = _mm_shuffle_ps(vi4x5678, vi4x89AB, _MM_SHUFFLE(1, 0, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x6789, vk04));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x6789, vk14));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x6789, vk24));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3x6789, vk34));
vo0p4 = _mm_add_ps(vo0p4, _mm_mul_ps(vi4x6789, vk44));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p2 = _mm_add_ps(vo0p2, vo0p3);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
vo0p0 = _mm_add_ps(vo0p0, vo0p4);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vi0x4567, vmask);
vi1x4567 = _mm_and_ps(vi1x4567, vmask);
vi2x4567 = _mm_and_ps(vi2x4567, vmask);
vi3x4567 = _mm_and_ps(vi3x4567, vmask);
vi4x4567 = _mm_and_ps(vi4x4567, vmask);
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk02));
__m128 vo0p1 = _mm_mul_ps(vi1x4567, vk12);
__m128 vo0p2 = _mm_mul_ps(vi2x4567, vk22);
__m128 vo0p3 = _mm_mul_ps(vi3x4567, vk32);
__m128 vo0p4 = _mm_mul_ps(vi4x4567, vk42);
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk01));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x3456, vk11));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x3456, vk21));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3x3456, vk31));
vo0p4 = _mm_add_ps(vo0p4, _mm_mul_ps(vi4x3456, vk41));
const __m128 vi0x2345 = _mm_shuffle_ps(vi0x3012, vi0x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1x2345 = _mm_shuffle_ps(vi1x3012, vi1x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2x2345 = _mm_shuffle_ps(vi2x3012, vi2x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3x2345 = _mm_shuffle_ps(vi3x3012, vi3x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4x2345 = _mm_shuffle_ps(vi4x3012, vi4x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vzero = _mm_setzero_ps();
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vzero);
const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vzero);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x2345, vk00));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x2345, vk10));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x2345, vk20));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3x2345, vk30));
vo0p4 = _mm_add_ps(vo0p4, _mm_mul_ps(vi4x2345, vk40));
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk03));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x5678, vk13));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x5678, vk23));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3x5678, vk33));
vo0p4 = _mm_add_ps(vo0p4, _mm_mul_ps(vi4x5678, vk43));
const __m128 vi0x6789 = _mm_shuffle_ps(vi0x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi1x6789 = _mm_shuffle_ps(vi1x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi2x6789 = _mm_shuffle_ps(vi2x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi3x6789 = _mm_shuffle_ps(vi3x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi4x6789 = _mm_shuffle_ps(vi4x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x6789, vk04));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x6789, vk14));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x6789, vk24));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3x6789, vk34));
vo0p4 = _mm_add_ps(vo0p4, _mm_mul_ps(vi4x6789, vk44));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p2 = _mm_add_ps(vo0p2, vo0p3);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
vo0p0 = _mm_add_ps(vo0p0, vo0p4);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 18,456 | 43.474699 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-sse-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__sse_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const __m128 vmask = _mm_load_ps((const float*) params->sse_stride1.mask);
const __m128 vmax = _mm_load_ps(params->sse_stride1.max);
const __m128 vmin = _mm_load_ps(params->sse_stride1.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk03 = _mm_load1_ps(weights + 4);
const __m128 vk04 = _mm_load1_ps(weights + 5);
const __m128 vk10 = _mm_load1_ps(weights + 6);
const __m128 vk11 = _mm_load1_ps(weights + 7);
const __m128 vk12 = _mm_load1_ps(weights + 8);
const __m128 vk13 = _mm_load1_ps(weights + 9);
const __m128 vk14 = _mm_load1_ps(weights + 10);
const __m128 vk20 = _mm_load1_ps(weights + 11);
const __m128 vk21 = _mm_load1_ps(weights + 12);
const __m128 vk22 = _mm_load1_ps(weights + 13);
const __m128 vk23 = _mm_load1_ps(weights + 14);
const __m128 vk24 = _mm_load1_ps(weights + 15);
const __m128 vk30 = _mm_load1_ps(weights + 16);
const __m128 vk31 = _mm_load1_ps(weights + 17);
const __m128 vk32 = _mm_load1_ps(weights + 18);
const __m128 vk33 = _mm_load1_ps(weights + 19);
const __m128 vk34 = _mm_load1_ps(weights + 20);
const __m128 vk40 = _mm_load1_ps(weights + 21);
const __m128 vk41 = _mm_load1_ps(weights + 22);
const __m128 vk42 = _mm_load1_ps(weights + 23);
const __m128 vk43 = _mm_load1_ps(weights + 24);
const __m128 vk44 = _mm_load1_ps(weights + 25);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
__m128 vi0x3012 = _mm_setzero_ps();
__m128 vi1x3012 = _mm_setzero_ps();
__m128 vi2x3012 = _mm_setzero_ps();
__m128 vi3x3012 = _mm_setzero_ps();
__m128 vi4x3012 = _mm_setzero_ps();
__m128 vi0x4567 = _mm_loadu_ps(i0);
i0 += 4;
__m128 vi1x4567 = _mm_loadu_ps(i1);
i1 += 4;
__m128 vi2x4567 = _mm_loadu_ps(i2);
i2 += 4;
__m128 vi3x4567 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vi4x4567 = _mm_loadu_ps(i4);
i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk22));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x4567, vk32));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x4567, vk42));
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi0x89AB = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1x89AB = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2x89AB = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3x89AB = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4x89AB = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk21));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x3456, vk31));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x3456, vk41));
const __m128 vi0x2345 = _mm_shuffle_ps(vi0x3012, vi0x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi0x3012 = vi0x7456;
const __m128 vi1x2345 = _mm_shuffle_ps(vi1x3012, vi1x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi1x3012 = vi1x7456;
const __m128 vi2x2345 = _mm_shuffle_ps(vi2x3012, vi2x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi2x3012 = vi2x7456;
const __m128 vi3x2345 = _mm_shuffle_ps(vi3x3012, vi3x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi3x3012 = vi3x7456;
const __m128 vi4x2345 = _mm_shuffle_ps(vi4x3012, vi4x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi4x3012 = vi4x7456;
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
vi0x4567 = vi0x89AB;
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
vi1x4567 = vi1x89AB;
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
vi2x4567 = vi2x89AB;
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
vi3x4567 = vi3x89AB;
const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vi4x89AB);
vi4x4567 = vi4x89AB;
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x2345, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x2345, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x2345, vk20));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x2345, vk30));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x2345, vk40));
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk03));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk13));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk23));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x5678, vk33));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x5678, vk43));
const __m128 vi0x6789 = _mm_shuffle_ps(vi0x5678, vi0x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi1x6789 = _mm_shuffle_ps(vi1x5678, vi1x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi2x6789 = _mm_shuffle_ps(vi2x5678, vi2x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi3x6789 = _mm_shuffle_ps(vi3x5678, vi3x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi4x6789 = _mm_shuffle_ps(vi4x5678, vi4x89AB, _MM_SHUFFLE(1, 0, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x6789, vk04));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x6789, vk14));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x6789, vk24));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x6789, vk34));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x6789, vk44));
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk22));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x4567, vk32));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x4567, vk42));
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi0x89AB = _mm_and_ps(_mm_loadu_ps(i0), vmask);
i0 += 4;
const __m128 vi1x89AB = _mm_and_ps(_mm_loadu_ps(i1), vmask);
i1 += 4;
const __m128 vi2x89AB = _mm_and_ps(_mm_loadu_ps(i2), vmask);
i2 += 4;
const __m128 vi3x89AB = _mm_and_ps(_mm_loadu_ps(i3), vmask);
i3 += 4;
const __m128 vi4x89AB = _mm_and_ps(_mm_loadu_ps(i4), vmask);
i4 += 4;
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk21));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x3456, vk31));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x3456, vk41));
const __m128 vi0x2345 = _mm_shuffle_ps(vi0x3012, vi0x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi0x3012 = vi0x7456;
const __m128 vi1x2345 = _mm_shuffle_ps(vi1x3012, vi1x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi1x3012 = vi1x7456;
const __m128 vi2x2345 = _mm_shuffle_ps(vi2x3012, vi2x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi2x3012 = vi2x7456;
const __m128 vi3x2345 = _mm_shuffle_ps(vi3x3012, vi3x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi3x3012 = vi3x7456;
const __m128 vi4x2345 = _mm_shuffle_ps(vi4x3012, vi4x7456, _MM_SHUFFLE(2, 1, 0, 3));
vi4x3012 = vi4x7456;
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
vi0x4567 = vi0x89AB;
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
vi1x4567 = vi1x89AB;
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
vi2x4567 = vi2x89AB;
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
vi3x4567 = vi3x89AB;
const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vi4x89AB);
vi4x4567 = vi4x89AB;
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x2345, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x2345, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x2345, vk20));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x2345, vk30));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x2345, vk40));
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk03));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk13));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk23));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x5678, vk33));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x5678, vk43));
const __m128 vi0x6789 = _mm_shuffle_ps(vi0x5678, vi0x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi1x6789 = _mm_shuffle_ps(vi1x5678, vi1x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi2x6789 = _mm_shuffle_ps(vi2x5678, vi2x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi3x6789 = _mm_shuffle_ps(vi3x5678, vi3x89AB, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi4x6789 = _mm_shuffle_ps(vi4x5678, vi4x89AB, _MM_SHUFFLE(1, 0, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x6789, vk04));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x6789, vk14));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x6789, vk24));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x6789, vk34));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x6789, vk44));
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
vi0x4567 = _mm_and_ps(vi0x4567, vmask);
vi1x4567 = _mm_and_ps(vi1x4567, vmask);
vi2x4567 = _mm_and_ps(vi2x4567, vmask);
vi3x4567 = _mm_and_ps(vi3x4567, vmask);
vi4x4567 = _mm_and_ps(vi4x4567, vmask);
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk22));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x4567, vk32));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x4567, vk42));
const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk21));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x3456, vk31));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x3456, vk41));
const __m128 vi0x2345 = _mm_shuffle_ps(vi0x3012, vi0x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1x2345 = _mm_shuffle_ps(vi1x3012, vi1x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2x2345 = _mm_shuffle_ps(vi2x3012, vi2x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3x2345 = _mm_shuffle_ps(vi3x3012, vi3x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4x2345 = _mm_shuffle_ps(vi4x3012, vi4x7456, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vzero = _mm_setzero_ps();
const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vzero);
const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vzero);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x2345, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x2345, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x2345, vk20));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x2345, vk30));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x2345, vk40));
const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk03));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk13));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk23));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x5678, vk33));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x5678, vk43));
const __m128 vi0x6789 = _mm_shuffle_ps(vi0x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi1x6789 = _mm_shuffle_ps(vi1x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi2x6789 = _mm_shuffle_ps(vi2x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi3x6789 = _mm_shuffle_ps(vi3x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 vi4x6789 = _mm_shuffle_ps(vi4x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x6789, vk04));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x6789, vk14));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x6789, vk24));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x6789, vk34));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x6789, vk44));
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w & (1 * sizeof(float))) {
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 18,115 | 43.952854 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk03 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk04 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk12 = wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
const v128_t vk13 = wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
const v128_t vk14 = wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
const v128_t vk23 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
const v128_t vk24 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
const v128_t vk30 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
const v128_t vk31 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
const v128_t vk32 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
const v128_t vk33 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
const v128_t vk34 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
const v128_t vk40 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
const v128_t vk41 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
const v128_t vk42 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
const v128_t vk43 = wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0);
const v128_t vk44 = wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
v128_t vo0p0 = vbias;
v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = vbias;
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 17,685 | 40.226107 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk03 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk04 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk12 = wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
const v128_t vk13 = wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
const v128_t vk14 = wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
const v128_t vk23 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
const v128_t vk24 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
const v128_t vk30 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
const v128_t vk31 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
const v128_t vk32 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
const v128_t vk33 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
const v128_t vk34 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
const v128_t vk40 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
const v128_t vk41 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
const v128_t vk42 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
const v128_t vk43 = wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0);
const v128_t vk44 = wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, vk22));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, vk31));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, vk33));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, vk04));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
v128_t vo0p0 = vbias;
v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, vk22));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, vk31));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, vk33));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, vk04));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = vbias;
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, vk22));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, vk31));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, vk33));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, vk04));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 17,769 | 40.134259 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk03 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk04 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk12 = wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
const v128_t vk13 = wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
const v128_t vk14 = wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
const v128_t vk23 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
const v128_t vk24 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
const v128_t vk30 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
const v128_t vk31 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
const v128_t vk32 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
const v128_t vk33 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
const v128_t vk34 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
const v128_t vk40 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
const v128_t vk41 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
const v128_t vk42 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
const v128_t vk43 = wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0);
const v128_t vk44 = wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x3456, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x3456, vk31));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x2345, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x5678, vk13));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x5678, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
v128_t vo0p0 = vbias;
v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x3456, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x3456, vk31));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x2345, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x5678, vk13));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x5678, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = vbias;
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x3456, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x3456, vk31));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x2345, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x5678, vk13));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x5678, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 17,853 | 40.043678 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-wasmsimd-arm-loadsplat-1x4-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4_acc5(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk03 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk04 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk12 = wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
const v128_t vk13 = wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
const v128_t vk14 = wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
const v128_t vk23 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
const v128_t vk24 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
const v128_t vk30 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
const v128_t vk31 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
const v128_t vk32 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
const v128_t vk33 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
const v128_t vk34 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
const v128_t vk40 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
const v128_t vk41 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
const v128_t vk42 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
const v128_t vk43 = wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0);
const v128_t vk44 = wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, vk32);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, vk01));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, vk11));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, vk00));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, vk10));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk13));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, vk24));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
v128_t vo0p0 = vbias;
v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, vk32);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, vk01));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, vk11));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, vk00));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, vk10));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk13));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, vk24));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = vbias;
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, vk32);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, vk01));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, vk11));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, vk00));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, vk10));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk13));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, vk24));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 17,937 | 39.954338 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-wasmsimd-arm-loadsplat-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk03 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk04 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk12 = wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
const v128_t vk13 = wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
const v128_t vk14 = wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
const v128_t vk23 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
const v128_t vk24 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
const v128_t vk30 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
const v128_t vk31 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
const v128_t vk32 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
const v128_t vk33 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
const v128_t vk34 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
const v128_t vk40 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
const v128_t vk41 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
const v128_t vk42 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
const v128_t vk43 = wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0);
const v128_t vk44 = wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
v128_t vo0p0 = vbias;
v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = vbias;
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 17,596 | 40.307512 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-wasmsimd-arm-splat-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 19,080 | 46.347395 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-wasmsimd-arm-splat-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 19,164 | 46.204433 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-wasmsimd-arm-splat-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 19,248 | 46.06357 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-wasmsimd-arm-splat-1x4-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4_acc5(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 19,332 | 45.924757 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-wasmsimd-arm-splat-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
if XNN_LIKELY(w & (4 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 18,991 | 46.48 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk03 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk04 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk12 = wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
const v128_t vk13 = wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
const v128_t vk14 = wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
const v128_t vk23 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
const v128_t vk24 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
const v128_t vk30 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
const v128_t vk31 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
const v128_t vk32 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
const v128_t vk33 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
const v128_t vk34 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
const v128_t vk40 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
const v128_t vk41 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
const v128_t vk42 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
const v128_t vk43 = wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0);
const v128_t vk44 = wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
v128_t vo0p0 = vbias;
v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = vbias;
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
if XNN_LIKELY(w & (4 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 17,691 | 40.240093 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk03 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk04 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk12 = wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
const v128_t vk13 = wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
const v128_t vk14 = wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
const v128_t vk23 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
const v128_t vk24 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
const v128_t vk30 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
const v128_t vk31 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
const v128_t vk32 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
const v128_t vk33 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
const v128_t vk34 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
const v128_t vk40 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
const v128_t vk41 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
const v128_t vk42 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
const v128_t vk43 = wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0);
const v128_t vk44 = wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, vk22));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, vk31));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, vk33));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, vk04));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
v128_t vo0p0 = vbias;
v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, vk22));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, vk31));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, vk33));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, vk04));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = vbias;
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, vk22));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, vk31));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, vk33));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, vk04));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
if XNN_LIKELY(w & (4 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 17,775 | 40.148148 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk03 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk04 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk12 = wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
const v128_t vk13 = wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
const v128_t vk14 = wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
const v128_t vk23 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
const v128_t vk24 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
const v128_t vk30 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
const v128_t vk31 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
const v128_t vk32 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
const v128_t vk33 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
const v128_t vk34 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
const v128_t vk40 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
const v128_t vk41 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
const v128_t vk42 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
const v128_t vk43 = wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0);
const v128_t vk44 = wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x3456, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x3456, vk31));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x2345, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x5678, vk13));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x5678, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
v128_t vo0p0 = vbias;
v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x3456, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x3456, vk31));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x2345, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x5678, vk13));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x5678, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = vbias;
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x3456, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x3456, vk31));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x2345, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x5678, vk13));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x5678, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
if XNN_LIKELY(w & (4 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 17,859 | 40.057471 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-wasmsimd-x86-loadsplat-1x4-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc5(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk03 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk04 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk12 = wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
const v128_t vk13 = wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
const v128_t vk14 = wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
const v128_t vk23 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
const v128_t vk24 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
const v128_t vk30 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
const v128_t vk31 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
const v128_t vk32 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
const v128_t vk33 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
const v128_t vk34 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
const v128_t vk40 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
const v128_t vk41 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
const v128_t vk42 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
const v128_t vk43 = wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0);
const v128_t vk44 = wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, vk32);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, vk01));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, vk11));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, vk00));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, vk10));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk13));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, vk24));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
v128_t vo0p0 = vbias;
v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, vk32);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, vk01));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, vk11));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, vk00));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, vk10));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk13));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, vk24));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = vbias;
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, vk32);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, vk01));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, vk11));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, vk00));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, vk10));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk13));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, vk24));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
if XNN_LIKELY(w & (4 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 17,943 | 39.968037 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-wasmsimd-x86-loadsplat-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
const v128_t vk03 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
const v128_t vk04 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
const v128_t vk12 = wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
const v128_t vk13 = wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
const v128_t vk14 = wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
const v128_t vk20 = wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
const v128_t vk21 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
const v128_t vk22 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
const v128_t vk23 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
const v128_t vk24 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
const v128_t vk30 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
const v128_t vk31 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
const v128_t vk32 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
const v128_t vk33 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
const v128_t vk34 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
const v128_t vk40 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
const v128_t vk41 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
const v128_t vk42 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
const v128_t vk43 = wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0);
const v128_t vk44 = wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = vbias;
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
v128_t vo0p0 = vbias;
v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = vbias;
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
if XNN_LIKELY(w & (4 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 17,602 | 40.321596 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-wasmsimd-x86-splat-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
if XNN_LIKELY(w & (4 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 19,086 | 46.362283 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-wasmsimd-x86-splat-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
if XNN_LIKELY(w & (4 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 19,170 | 46.219212 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-wasmsimd-x86-splat-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
if XNN_LIKELY(w & (4 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 19,254 | 46.07824 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-wasmsimd-x86-splat-1x4-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4_acc5(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
if XNN_LIKELY(w & (4 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 19,338 | 45.93932 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5p2-minmax-wasmsimd-x86-splat-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top == 2);
const v128_t vmask = wasm_v128_load(params->wasmsimd_stride1.mask);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride1.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride1.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
const float* i0 = zero;
const float* i1 = zero;
const float* i2 = input;
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t output_height = input_height;
do {
if XNN_UNPREDICTABLE(output_height < 2) {
i3 = zero;
}
if XNN_UNPREDICTABLE(output_height < 3) {
i4 = zero;
}
v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Always process the last block of 5..8 pixels.
if XNN_LIKELY(w > 4 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
vi0x0123 = vi0x4567;
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
vi1x0123 = vi1x4567;
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
vi2x0123 = vi2x4567;
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
vi3x0123 = vi3x4567;
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vi4x0123 = vi4x4567;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
vi0x4567 = vi0x89AB;
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
vi1x4567 = vi1x89AB;
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
vi2x4567 = vi2x89AB;
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
vi3x4567 = vi3x89AB;
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
vi4x4567 = vi4x89AB;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
w -= 4 * sizeof(float);
}
assert(w >= 1 * sizeof(float));
assert(w <= 4 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x4567 = wasm_v128_and(vmask, vi0x4567);
vi1x4567 = wasm_v128_and(vmask, vi1x4567);
vi2x4567 = wasm_v128_and(vmask, vi2x4567);
vi3x4567 = wasm_v128_and(vmask, vi3x4567);
vi4x4567 = wasm_v128_and(vmask, vi4x4567);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
if XNN_LIKELY(w & (4 * sizeof(float))) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i1 - input_decrement);
i1 = (const float*) ((uintptr_t) i2 - input_decrement);
i2 = (const float*) ((uintptr_t) i1 + input_width);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
} while (--output_height != 0);
}
| 18,997 | 46.495 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-aarch64-neonfma-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__aarch64_neonfma_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const uint32x4_t vmask_even = vld1q_u32(params->neon_stride2.mask_even);
const uint32x4_t vmask_odd = vld1q_u32(params->neon_stride2.mask_odd);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride2.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride2.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
float32x4_t vi0x0246 = vmovq_n_f32(0.0f);
float32x4_t vi1x0246 = vmovq_n_f32(0.0f);
float32x4_t vi2x0246 = vmovq_n_f32(0.0f);
float32x4_t vi3x0246 = vmovq_n_f32(0.0f);
float32x4_t vi4x0246 = vmovq_n_f32(0.0f);
float32x4_t vi0x1357 = vmovq_n_f32(0.0f);
float32x4_t vi1x1357 = vmovq_n_f32(0.0f);
float32x4_t vi2x1357 = vmovq_n_f32(0.0f);
float32x4_t vi3x1357 = vmovq_n_f32(0.0f);
float32x4_t vi4x1357 = vmovq_n_f32(0.0f);
float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0); i0 += 8;
float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1); i1 += 8;
float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2); i2 += 8;
float32x4x2_t vi3x8ACE9BDF = vld2q_f32(i3); i3 += 8;
float32x4x2_t vi4x8ACE9BDF = vld2q_f32(i4); i4 += 8;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE9BDF.val[0], vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x8ACE9BDF.val[0], vget_low_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x8ACE9BDF.val[0], vget_low_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x8ACE9BDF.val[0], vget_high_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x8ACE9BDF.val[0], vget_high_f32(vwKLMN), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x8ACE9BDF.val[1], vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x8ACE9BDF.val[1], vget_low_f32(vw89AB), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x8ACE9BDF.val[1], vget_high_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x8ACE9BDF.val[1], vget_high_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x8ACE9BDF.val[1], vwOP, 0);
const float32x4_t vi0x68AC = vextq_f32(vi0x0246, vi0x8ACE9BDF.val[0], 3);
vi0x0246 = vi0x8ACE9BDF.val[0];
const float32x4_t vi1x68AC = vextq_f32(vi1x0246, vi1x8ACE9BDF.val[0], 3);
vi1x0246 = vi1x8ACE9BDF.val[0];
const float32x4_t vi2x68AC = vextq_f32(vi2x0246, vi2x8ACE9BDF.val[0], 3);
vi2x0246 = vi2x8ACE9BDF.val[0];
const float32x4_t vi3x68AC = vextq_f32(vi3x0246, vi3x8ACE9BDF.val[0], 3);
vi3x0246 = vi3x8ACE9BDF.val[0];
const float32x4_t vi4x68AC = vextq_f32(vi4x0246, vi4x8ACE9BDF.val[0], 3);
vi4x0246 = vi4x8ACE9BDF.val[0];
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x68AC, vget_low_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x68AC, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x68AC, vget_high_f32(vw89AB), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x68AC, vget_low_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x68AC, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x8ACE9BDF.val[1], 3);
vi0x1357 = vi0x8ACE9BDF.val[1];
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x8ACE9BDF.val[1], 3);
vi1x1357 = vi1x8ACE9BDF.val[1];
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3);
vi2x1357 = vi2x8ACE9BDF.val[1];
const float32x4_t vi3x79BD = vextq_f32(vi3x1357, vi3x8ACE9BDF.val[1], 3);
vi3x1357 = vi3x8ACE9BDF.val[1];
const float32x4_t vi4x79BD = vextq_f32(vi4x1357, vi4x8ACE9BDF.val[1], 3);
vi4x1357 = vi4x8ACE9BDF.val[1];
const float32x4x2_t vi0xGIKMHJLN = vld2q_f32(i0); i0 += 8;
const float32x4x2_t vi1xGIKMHJLN = vld2q_f32(i1); i1 += 8;
const float32x4x2_t vi2xGIKMHJLN = vld2q_f32(i2); i2 += 8;
const float32x4x2_t vi3xGIKMHJLN = vld2q_f32(i3); i3 += 8;
const float32x4x2_t vi4xGIKMHJLN = vld2q_f32(i4); i4 += 8;
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x79BD, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x79BD, vget_high_f32(vw4567), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x79BD, vget_low_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x79BD, vget_low_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x79BD, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0xACEG = vextq_f32(vi0x8ACE9BDF.val[0], vi0xGIKMHJLN.val[0], 1);
vi0x8ACE9BDF = vi0xGIKMHJLN;
const float32x4_t vi1xACEG = vextq_f32(vi1x8ACE9BDF.val[0], vi1xGIKMHJLN.val[0], 1);
vi1x8ACE9BDF = vi1xGIKMHJLN;
const float32x4_t vi2xACEG = vextq_f32(vi2x8ACE9BDF.val[0], vi2xGIKMHJLN.val[0], 1);
vi2x8ACE9BDF = vi2xGIKMHJLN;
const float32x4_t vi3xACEG = vextq_f32(vi3x8ACE9BDF.val[0], vi3xGIKMHJLN.val[0], 1);
vi3x8ACE9BDF = vi3xGIKMHJLN;
const float32x4_t vi4xACEG = vextq_f32(vi4x8ACE9BDF.val[0], vi4xGIKMHJLN.val[0], 1);
vi4x8ACE9BDF = vi4xGIKMHJLN;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0xACEG, vget_low_f32(vw4567), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1xACEG, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2xACEG, vget_high_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3xACEG, vget_low_f32(vwKLMN), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4xACEG, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[0])));
const float32x4_t vi1x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[0])));
const float32x4_t vi2x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[0])));
const float32x4_t vi3x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[0])));
const float32x4_t vi4x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[0])));
const float32x4_t vi0x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[1])));
const float32x4_t vi1x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[1])));
const float32x4_t vi2x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[1])));
const float32x4_t vi3x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[1])));
const float32x4_t vi4x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[1])));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x8ACE, vget_low_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x8ACE, vget_low_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x8ACE, vget_high_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x8ACE, vget_high_f32(vwKLMN), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x9BDF, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x9BDF, vget_low_f32(vw89AB), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x9BDF, vget_high_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x9BDF, vget_high_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x9BDF, vwOP, 0);
const float32x4_t vi0x68AC = vextq_f32(vi0x0246, vi0x8ACE, 3);
const float32x4_t vi1x68AC = vextq_f32(vi1x0246, vi1x8ACE, 3);
const float32x4_t vi2x68AC = vextq_f32(vi2x0246, vi2x8ACE, 3);
const float32x4_t vi3x68AC = vextq_f32(vi3x0246, vi3x8ACE, 3);
const float32x4_t vi4x68AC = vextq_f32(vi4x0246, vi4x8ACE, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x68AC, vget_low_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x68AC, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x68AC, vget_high_f32(vw89AB), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x68AC, vget_low_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x68AC, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x9BDF, 3);
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x9BDF, 3);
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x9BDF, 3);
const float32x4_t vi3x79BD = vextq_f32(vi3x1357, vi3x9BDF, 3);
const float32x4_t vi4x79BD = vextq_f32(vi4x1357, vi4x9BDF, 3);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x79BD, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x79BD, vget_high_f32(vw4567), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x79BD, vget_low_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x79BD, vget_low_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x79BD, vget_high_f32(vwKLMN), 0);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0xACEG = vextq_f32(vi0x8ACE, vzero, 1);
const float32x4_t vi1xACEG = vextq_f32(vi1x8ACE, vzero, 1);
const float32x4_t vi2xACEG = vextq_f32(vi2x8ACE, vzero, 1);
const float32x4_t vi3xACEG = vextq_f32(vi3x8ACE, vzero, 1);
const float32x4_t vi4xACEG = vextq_f32(vi4x8ACE, vzero, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0xACEG, vget_low_f32(vw4567), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1xACEG, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2xACEG, vget_high_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3xACEG, vget_low_f32(vwKLMN), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4xACEG, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w_tmp & 2) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w_tmp & 1) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 12,804 | 40.173633 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-aarch64-neonfma-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__aarch64_neonfma_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const uint32x4_t vmask_even = vld1q_u32(params->neon_stride2.mask_even);
const uint32x4_t vmask_odd = vld1q_u32(params->neon_stride2.mask_odd);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride2.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride2.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
float32x4_t vi0x0246 = vmovq_n_f32(0.0f);
float32x4_t vi1x0246 = vmovq_n_f32(0.0f);
float32x4_t vi2x0246 = vmovq_n_f32(0.0f);
float32x4_t vi3x0246 = vmovq_n_f32(0.0f);
float32x4_t vi4x0246 = vmovq_n_f32(0.0f);
float32x4_t vi0x1357 = vmovq_n_f32(0.0f);
float32x4_t vi1x1357 = vmovq_n_f32(0.0f);
float32x4_t vi2x1357 = vmovq_n_f32(0.0f);
float32x4_t vi3x1357 = vmovq_n_f32(0.0f);
float32x4_t vi4x1357 = vmovq_n_f32(0.0f);
float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0); i0 += 8;
float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1); i1 += 8;
float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2); i2 += 8;
float32x4x2_t vi3x8ACE9BDF = vld2q_f32(i3); i3 += 8;
float32x4x2_t vi4x8ACE9BDF = vld2q_f32(i4); i4 += 8;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE9BDF.val[0], vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x8ACE9BDF.val[0], vget_low_f32(vw89AB), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x8ACE9BDF.val[0], vget_low_f32(vwCDEF), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi3x8ACE9BDF.val[0], vget_high_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x8ACE9BDF.val[0], vget_high_f32(vwKLMN), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x8ACE9BDF.val[1], vget_low_f32(vw4567), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi1x8ACE9BDF.val[1], vget_low_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x8ACE9BDF.val[1], vget_high_f32(vwCDEF), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x8ACE9BDF.val[1], vget_high_f32(vwGHIJ), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4x8ACE9BDF.val[1], vwOP, 0);
const float32x4_t vi0x68AC = vextq_f32(vi0x0246, vi0x8ACE9BDF.val[0], 3);
vi0x0246 = vi0x8ACE9BDF.val[0];
const float32x4_t vi1x68AC = vextq_f32(vi1x0246, vi1x8ACE9BDF.val[0], 3);
vi1x0246 = vi1x8ACE9BDF.val[0];
const float32x4_t vi2x68AC = vextq_f32(vi2x0246, vi2x8ACE9BDF.val[0], 3);
vi2x0246 = vi2x8ACE9BDF.val[0];
const float32x4_t vi3x68AC = vextq_f32(vi3x0246, vi3x8ACE9BDF.val[0], 3);
vi3x0246 = vi3x8ACE9BDF.val[0];
const float32x4_t vi4x68AC = vextq_f32(vi4x0246, vi4x8ACE9BDF.val[0], 3);
vi4x0246 = vi4x8ACE9BDF.val[0];
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x68AC, vget_low_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x68AC, vget_high_f32(vw4567), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi2x68AC, vget_high_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x68AC, vget_low_f32(vwGHIJ), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x68AC, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x8ACE9BDF.val[1], 3);
vi0x1357 = vi0x8ACE9BDF.val[1];
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x8ACE9BDF.val[1], 3);
vi1x1357 = vi1x8ACE9BDF.val[1];
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3);
vi2x1357 = vi2x8ACE9BDF.val[1];
const float32x4_t vi3x79BD = vextq_f32(vi3x1357, vi3x8ACE9BDF.val[1], 3);
vi3x1357 = vi3x8ACE9BDF.val[1];
const float32x4_t vi4x79BD = vextq_f32(vi4x1357, vi4x8ACE9BDF.val[1], 3);
vi4x1357 = vi4x8ACE9BDF.val[1];
const float32x4x2_t vi0xGIKMHJLN = vld2q_f32(i0); i0 += 8;
const float32x4x2_t vi1xGIKMHJLN = vld2q_f32(i1); i1 += 8;
const float32x4x2_t vi2xGIKMHJLN = vld2q_f32(i2); i2 += 8;
const float32x4x2_t vi3xGIKMHJLN = vld2q_f32(i3); i3 += 8;
const float32x4x2_t vi4xGIKMHJLN = vld2q_f32(i4); i4 += 8;
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x79BD, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x79BD, vget_high_f32(vw4567), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x79BD, vget_low_f32(vwCDEF), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi3x79BD, vget_low_f32(vwGHIJ), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x79BD, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0xACEG = vextq_f32(vi0x8ACE9BDF.val[0], vi0xGIKMHJLN.val[0], 1);
vi0x8ACE9BDF = vi0xGIKMHJLN;
const float32x4_t vi1xACEG = vextq_f32(vi1x8ACE9BDF.val[0], vi1xGIKMHJLN.val[0], 1);
vi1x8ACE9BDF = vi1xGIKMHJLN;
const float32x4_t vi2xACEG = vextq_f32(vi2x8ACE9BDF.val[0], vi2xGIKMHJLN.val[0], 1);
vi2x8ACE9BDF = vi2xGIKMHJLN;
const float32x4_t vi3xACEG = vextq_f32(vi3x8ACE9BDF.val[0], vi3xGIKMHJLN.val[0], 1);
vi3x8ACE9BDF = vi3xGIKMHJLN;
const float32x4_t vi4xACEG = vextq_f32(vi4x8ACE9BDF.val[0], vi4xGIKMHJLN.val[0], 1);
vi4x8ACE9BDF = vi4xGIKMHJLN;
vo0p1 = vfmaq_lane_f32(vo0p1, vi0xACEG, vget_low_f32(vw4567), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi1xACEG, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2xACEG, vget_high_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3xACEG, vget_low_f32(vwKLMN), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4xACEG, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[0])));
const float32x4_t vi1x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[0])));
const float32x4_t vi2x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[0])));
const float32x4_t vi3x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[0])));
const float32x4_t vi4x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[0])));
const float32x4_t vi0x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[1])));
const float32x4_t vi1x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[1])));
const float32x4_t vi2x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[1])));
const float32x4_t vi3x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[1])));
const float32x4_t vi4x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[1])));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x8ACE, vget_low_f32(vw89AB), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x8ACE, vget_low_f32(vwCDEF), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi3x8ACE, vget_high_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x8ACE, vget_high_f32(vwKLMN), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x9BDF, vget_low_f32(vw4567), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi1x9BDF, vget_low_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x9BDF, vget_high_f32(vwCDEF), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x9BDF, vget_high_f32(vwGHIJ), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4x9BDF, vwOP, 0);
const float32x4_t vi0x68AC = vextq_f32(vi0x0246, vi0x8ACE, 3);
const float32x4_t vi1x68AC = vextq_f32(vi1x0246, vi1x8ACE, 3);
const float32x4_t vi2x68AC = vextq_f32(vi2x0246, vi2x8ACE, 3);
const float32x4_t vi3x68AC = vextq_f32(vi3x0246, vi3x8ACE, 3);
const float32x4_t vi4x68AC = vextq_f32(vi4x0246, vi4x8ACE, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x68AC, vget_low_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x68AC, vget_high_f32(vw4567), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi2x68AC, vget_high_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x68AC, vget_low_f32(vwGHIJ), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x68AC, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x9BDF, 3);
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x9BDF, 3);
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x9BDF, 3);
const float32x4_t vi3x79BD = vextq_f32(vi3x1357, vi3x9BDF, 3);
const float32x4_t vi4x79BD = vextq_f32(vi4x1357, vi4x9BDF, 3);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x79BD, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x79BD, vget_high_f32(vw4567), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x79BD, vget_low_f32(vwCDEF), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi3x79BD, vget_low_f32(vwGHIJ), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x79BD, vget_high_f32(vwKLMN), 0);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0xACEG = vextq_f32(vi0x8ACE, vzero, 1);
const float32x4_t vi1xACEG = vextq_f32(vi1x8ACE, vzero, 1);
const float32x4_t vi2xACEG = vextq_f32(vi2x8ACE, vzero, 1);
const float32x4_t vi3xACEG = vextq_f32(vi3x8ACE, vzero, 1);
const float32x4_t vi4xACEG = vextq_f32(vi4x8ACE, vzero, 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0xACEG, vget_low_f32(vw4567), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi1xACEG, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2xACEG, vget_high_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3xACEG, vget_low_f32(vwKLMN), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4xACEG, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w_tmp & 2) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w_tmp & 1) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 12,892 | 40.191693 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-aarch64-neonfma-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__aarch64_neonfma_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const uint32x4_t vmask_even = vld1q_u32(params->neon_stride2.mask_even);
const uint32x4_t vmask_odd = vld1q_u32(params->neon_stride2.mask_odd);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride2.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride2.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
float32x4_t vi0x0246 = vmovq_n_f32(0.0f);
float32x4_t vi1x0246 = vmovq_n_f32(0.0f);
float32x4_t vi2x0246 = vmovq_n_f32(0.0f);
float32x4_t vi3x0246 = vmovq_n_f32(0.0f);
float32x4_t vi4x0246 = vmovq_n_f32(0.0f);
float32x4_t vi0x1357 = vmovq_n_f32(0.0f);
float32x4_t vi1x1357 = vmovq_n_f32(0.0f);
float32x4_t vi2x1357 = vmovq_n_f32(0.0f);
float32x4_t vi3x1357 = vmovq_n_f32(0.0f);
float32x4_t vi4x1357 = vmovq_n_f32(0.0f);
float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0); i0 += 8;
float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1); i1 += 8;
float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2); i2 += 8;
float32x4x2_t vi3x8ACE9BDF = vld2q_f32(i3); i3 += 8;
float32x4x2_t vi4x8ACE9BDF = vld2q_f32(i4); i4 += 8;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE9BDF.val[0], vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x8ACE9BDF.val[0], vget_low_f32(vw89AB), 0);
float32x4_t vo0p3 = vmulq_lane_f32(vi2x8ACE9BDF.val[0], vget_low_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x8ACE9BDF.val[0], vget_high_f32(vwGHIJ), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4x8ACE9BDF.val[0], vget_high_f32(vwKLMN), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi0x8ACE9BDF.val[1], vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x8ACE9BDF.val[1], vget_low_f32(vw89AB), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x8ACE9BDF.val[1], vget_high_f32(vwCDEF), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi3x8ACE9BDF.val[1], vget_high_f32(vwGHIJ), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi4x8ACE9BDF.val[1], vwOP, 0);
const float32x4_t vi0x68AC = vextq_f32(vi0x0246, vi0x8ACE9BDF.val[0], 3);
vi0x0246 = vi0x8ACE9BDF.val[0];
const float32x4_t vi1x68AC = vextq_f32(vi1x0246, vi1x8ACE9BDF.val[0], 3);
vi1x0246 = vi1x8ACE9BDF.val[0];
const float32x4_t vi2x68AC = vextq_f32(vi2x0246, vi2x8ACE9BDF.val[0], 3);
vi2x0246 = vi2x8ACE9BDF.val[0];
const float32x4_t vi3x68AC = vextq_f32(vi3x0246, vi3x8ACE9BDF.val[0], 3);
vi3x0246 = vi3x8ACE9BDF.val[0];
const float32x4_t vi4x68AC = vextq_f32(vi4x0246, vi4x8ACE9BDF.val[0], 3);
vi4x0246 = vi4x8ACE9BDF.val[0];
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x68AC, vget_low_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x68AC, vget_high_f32(vw4567), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi2x68AC, vget_high_f32(vw89AB), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi3x68AC, vget_low_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x68AC, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x8ACE9BDF.val[1], 3);
vi0x1357 = vi0x8ACE9BDF.val[1];
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x8ACE9BDF.val[1], 3);
vi1x1357 = vi1x8ACE9BDF.val[1];
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3);
vi2x1357 = vi2x8ACE9BDF.val[1];
const float32x4_t vi3x79BD = vextq_f32(vi3x1357, vi3x8ACE9BDF.val[1], 3);
vi3x1357 = vi3x8ACE9BDF.val[1];
const float32x4_t vi4x79BD = vextq_f32(vi4x1357, vi4x8ACE9BDF.val[1], 3);
vi4x1357 = vi4x8ACE9BDF.val[1];
const float32x4x2_t vi0xGIKMHJLN = vld2q_f32(i0); i0 += 8;
const float32x4x2_t vi1xGIKMHJLN = vld2q_f32(i1); i1 += 8;
const float32x4x2_t vi2xGIKMHJLN = vld2q_f32(i2); i2 += 8;
const float32x4x2_t vi3xGIKMHJLN = vld2q_f32(i3); i3 += 8;
const float32x4x2_t vi4xGIKMHJLN = vld2q_f32(i4); i4 += 8;
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x79BD, vget_high_f32(vw0123), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi1x79BD, vget_high_f32(vw4567), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi2x79BD, vget_low_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x79BD, vget_low_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x79BD, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0xACEG = vextq_f32(vi0x8ACE9BDF.val[0], vi0xGIKMHJLN.val[0], 1);
vi0x8ACE9BDF = vi0xGIKMHJLN;
const float32x4_t vi1xACEG = vextq_f32(vi1x8ACE9BDF.val[0], vi1xGIKMHJLN.val[0], 1);
vi1x8ACE9BDF = vi1xGIKMHJLN;
const float32x4_t vi2xACEG = vextq_f32(vi2x8ACE9BDF.val[0], vi2xGIKMHJLN.val[0], 1);
vi2x8ACE9BDF = vi2xGIKMHJLN;
const float32x4_t vi3xACEG = vextq_f32(vi3x8ACE9BDF.val[0], vi3xGIKMHJLN.val[0], 1);
vi3x8ACE9BDF = vi3xGIKMHJLN;
const float32x4_t vi4xACEG = vextq_f32(vi4x8ACE9BDF.val[0], vi4xGIKMHJLN.val[0], 1);
vi4x8ACE9BDF = vi4xGIKMHJLN;
vo0p2 = vfmaq_lane_f32(vo0p2, vi0xACEG, vget_low_f32(vw4567), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1xACEG, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2xACEG, vget_high_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3xACEG, vget_low_f32(vwKLMN), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4xACEG, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[0])));
const float32x4_t vi1x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[0])));
const float32x4_t vi2x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[0])));
const float32x4_t vi3x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[0])));
const float32x4_t vi4x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[0])));
const float32x4_t vi0x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[1])));
const float32x4_t vi1x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[1])));
const float32x4_t vi2x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[1])));
const float32x4_t vi3x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[1])));
const float32x4_t vi4x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[1])));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x8ACE, vget_low_f32(vw89AB), 0);
float32x4_t vo0p3 = vmulq_lane_f32(vi2x8ACE, vget_low_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3x8ACE, vget_high_f32(vwGHIJ), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4x8ACE, vget_high_f32(vwKLMN), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi0x9BDF, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x9BDF, vget_low_f32(vw89AB), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi2x9BDF, vget_high_f32(vwCDEF), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi3x9BDF, vget_high_f32(vwGHIJ), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi4x9BDF, vwOP, 0);
const float32x4_t vi0x68AC = vextq_f32(vi0x0246, vi0x8ACE, 3);
const float32x4_t vi1x68AC = vextq_f32(vi1x0246, vi1x8ACE, 3);
const float32x4_t vi2x68AC = vextq_f32(vi2x0246, vi2x8ACE, 3);
const float32x4_t vi3x68AC = vextq_f32(vi3x0246, vi3x8ACE, 3);
const float32x4_t vi4x68AC = vextq_f32(vi4x0246, vi4x8ACE, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x68AC, vget_low_f32(vw0123), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi1x68AC, vget_high_f32(vw4567), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi2x68AC, vget_high_f32(vw89AB), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi3x68AC, vget_low_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x68AC, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x9BDF, 3);
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x9BDF, 3);
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x9BDF, 3);
const float32x4_t vi3x79BD = vextq_f32(vi3x1357, vi3x9BDF, 3);
const float32x4_t vi4x79BD = vextq_f32(vi4x1357, vi4x9BDF, 3);
vo0p1 = vfmaq_lane_f32(vo0p1, vi0x79BD, vget_high_f32(vw0123), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi1x79BD, vget_high_f32(vw4567), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi2x79BD, vget_low_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x79BD, vget_low_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x79BD, vget_high_f32(vwKLMN), 0);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0xACEG = vextq_f32(vi0x8ACE, vzero, 1);
const float32x4_t vi1xACEG = vextq_f32(vi1x8ACE, vzero, 1);
const float32x4_t vi2xACEG = vextq_f32(vi2x8ACE, vzero, 1);
const float32x4_t vi3xACEG = vextq_f32(vi3x8ACE, vzero, 1);
const float32x4_t vi4xACEG = vextq_f32(vi4x8ACE, vzero, 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0xACEG, vget_low_f32(vw4567), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1xACEG, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2xACEG, vget_high_f32(vwCDEF), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi3xACEG, vget_low_f32(vwKLMN), 0);
vo0p2 = vfmaq_lane_f32(vo0p2, vi4xACEG, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w_tmp & 2) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w_tmp & 1) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 12,980 | 40.209524 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-aarch64-neonfma-1x4-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__aarch64_neonfma_1x4_acc5(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const uint32x4_t vmask_even = vld1q_u32(params->neon_stride2.mask_even);
const uint32x4_t vmask_odd = vld1q_u32(params->neon_stride2.mask_odd);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride2.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride2.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
float32x4_t vi0x0246 = vmovq_n_f32(0.0f);
float32x4_t vi1x0246 = vmovq_n_f32(0.0f);
float32x4_t vi2x0246 = vmovq_n_f32(0.0f);
float32x4_t vi3x0246 = vmovq_n_f32(0.0f);
float32x4_t vi4x0246 = vmovq_n_f32(0.0f);
float32x4_t vi0x1357 = vmovq_n_f32(0.0f);
float32x4_t vi1x1357 = vmovq_n_f32(0.0f);
float32x4_t vi2x1357 = vmovq_n_f32(0.0f);
float32x4_t vi3x1357 = vmovq_n_f32(0.0f);
float32x4_t vi4x1357 = vmovq_n_f32(0.0f);
float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0); i0 += 8;
float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1); i1 += 8;
float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2); i2 += 8;
float32x4x2_t vi3x8ACE9BDF = vld2q_f32(i3); i3 += 8;
float32x4x2_t vi4x8ACE9BDF = vld2q_f32(i4); i4 += 8;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE9BDF.val[0], vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x8ACE9BDF.val[0], vget_low_f32(vw89AB), 0);
float32x4_t vo0p3 = vmulq_lane_f32(vi2x8ACE9BDF.val[0], vget_low_f32(vwCDEF), 1);
float32x4_t vo0p4 = vmulq_lane_f32(vi3x8ACE9BDF.val[0], vget_high_f32(vwGHIJ), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x8ACE9BDF.val[0], vget_high_f32(vwKLMN), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x8ACE9BDF.val[1], vget_low_f32(vw4567), 0);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1x8ACE9BDF.val[1], vget_low_f32(vw89AB), 1);
vo0p4 = vfmaq_lane_f32(vo0p4, vi2x8ACE9BDF.val[1], vget_high_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x8ACE9BDF.val[1], vget_high_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x8ACE9BDF.val[1], vwOP, 0);
const float32x4_t vi0x68AC = vextq_f32(vi0x0246, vi0x8ACE9BDF.val[0], 3);
vi0x0246 = vi0x8ACE9BDF.val[0];
const float32x4_t vi1x68AC = vextq_f32(vi1x0246, vi1x8ACE9BDF.val[0], 3);
vi1x0246 = vi1x8ACE9BDF.val[0];
const float32x4_t vi2x68AC = vextq_f32(vi2x0246, vi2x8ACE9BDF.val[0], 3);
vi2x0246 = vi2x8ACE9BDF.val[0];
const float32x4_t vi3x68AC = vextq_f32(vi3x0246, vi3x8ACE9BDF.val[0], 3);
vi3x0246 = vi3x8ACE9BDF.val[0];
const float32x4_t vi4x68AC = vextq_f32(vi4x0246, vi4x8ACE9BDF.val[0], 3);
vi4x0246 = vi4x8ACE9BDF.val[0];
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x68AC, vget_low_f32(vw0123), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1x68AC, vget_high_f32(vw4567), 0);
vo0p4 = vfmaq_lane_f32(vo0p4, vi2x68AC, vget_high_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x68AC, vget_low_f32(vwGHIJ), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x68AC, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x8ACE9BDF.val[1], 3);
vi0x1357 = vi0x8ACE9BDF.val[1];
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x8ACE9BDF.val[1], 3);
vi1x1357 = vi1x8ACE9BDF.val[1];
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3);
vi2x1357 = vi2x8ACE9BDF.val[1];
const float32x4_t vi3x79BD = vextq_f32(vi3x1357, vi3x8ACE9BDF.val[1], 3);
vi3x1357 = vi3x8ACE9BDF.val[1];
const float32x4_t vi4x79BD = vextq_f32(vi4x1357, vi4x8ACE9BDF.val[1], 3);
vi4x1357 = vi4x8ACE9BDF.val[1];
const float32x4x2_t vi0xGIKMHJLN = vld2q_f32(i0); i0 += 8;
const float32x4x2_t vi1xGIKMHJLN = vld2q_f32(i1); i1 += 8;
const float32x4x2_t vi2xGIKMHJLN = vld2q_f32(i2); i2 += 8;
const float32x4x2_t vi3xGIKMHJLN = vld2q_f32(i3); i3 += 8;
const float32x4x2_t vi4xGIKMHJLN = vld2q_f32(i4); i4 += 8;
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x79BD, vget_high_f32(vw0123), 0);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1x79BD, vget_high_f32(vw4567), 1);
vo0p4 = vfmaq_lane_f32(vo0p4, vi2x79BD, vget_low_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x79BD, vget_low_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x79BD, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0xACEG = vextq_f32(vi0x8ACE9BDF.val[0], vi0xGIKMHJLN.val[0], 1);
vi0x8ACE9BDF = vi0xGIKMHJLN;
const float32x4_t vi1xACEG = vextq_f32(vi1x8ACE9BDF.val[0], vi1xGIKMHJLN.val[0], 1);
vi1x8ACE9BDF = vi1xGIKMHJLN;
const float32x4_t vi2xACEG = vextq_f32(vi2x8ACE9BDF.val[0], vi2xGIKMHJLN.val[0], 1);
vi2x8ACE9BDF = vi2xGIKMHJLN;
const float32x4_t vi3xACEG = vextq_f32(vi3x8ACE9BDF.val[0], vi3xGIKMHJLN.val[0], 1);
vi3x8ACE9BDF = vi3xGIKMHJLN;
const float32x4_t vi4xACEG = vextq_f32(vi4x8ACE9BDF.val[0], vi4xGIKMHJLN.val[0], 1);
vi4x8ACE9BDF = vi4xGIKMHJLN;
vo0p2 = vfmaq_lane_f32(vo0p2, vi0xACEG, vget_low_f32(vw4567), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1xACEG, vget_high_f32(vw89AB), 0);
vo0p4 = vfmaq_lane_f32(vo0p4, vi2xACEG, vget_high_f32(vwCDEF), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3xACEG, vget_low_f32(vwKLMN), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4xACEG, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
vo0p0 = vaddq_f32(vo0p0, vo0p4);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[0])));
const float32x4_t vi1x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[0])));
const float32x4_t vi2x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[0])));
const float32x4_t vi3x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[0])));
const float32x4_t vi4x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[0])));
const float32x4_t vi0x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[1])));
const float32x4_t vi1x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[1])));
const float32x4_t vi2x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[1])));
const float32x4_t vi3x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[1])));
const float32x4_t vi4x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[1])));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x8ACE, vget_low_f32(vw89AB), 0);
float32x4_t vo0p3 = vmulq_lane_f32(vi2x8ACE, vget_low_f32(vwCDEF), 1);
float32x4_t vo0p4 = vmulq_lane_f32(vi3x8ACE, vget_high_f32(vwGHIJ), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x8ACE, vget_high_f32(vwKLMN), 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x9BDF, vget_low_f32(vw4567), 0);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1x9BDF, vget_low_f32(vw89AB), 1);
vo0p4 = vfmaq_lane_f32(vo0p4, vi2x9BDF, vget_high_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x9BDF, vget_high_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x9BDF, vwOP, 0);
const float32x4_t vi0x68AC = vextq_f32(vi0x0246, vi0x8ACE, 3);
const float32x4_t vi1x68AC = vextq_f32(vi1x0246, vi1x8ACE, 3);
const float32x4_t vi2x68AC = vextq_f32(vi2x0246, vi2x8ACE, 3);
const float32x4_t vi3x68AC = vextq_f32(vi3x0246, vi3x8ACE, 3);
const float32x4_t vi4x68AC = vextq_f32(vi4x0246, vi4x8ACE, 3);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x68AC, vget_low_f32(vw0123), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1x68AC, vget_high_f32(vw4567), 0);
vo0p4 = vfmaq_lane_f32(vo0p4, vi2x68AC, vget_high_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x68AC, vget_low_f32(vwGHIJ), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x68AC, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x9BDF, 3);
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x9BDF, 3);
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x9BDF, 3);
const float32x4_t vi3x79BD = vextq_f32(vi3x1357, vi3x9BDF, 3);
const float32x4_t vi4x79BD = vextq_f32(vi4x1357, vi4x9BDF, 3);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0x79BD, vget_high_f32(vw0123), 0);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1x79BD, vget_high_f32(vw4567), 1);
vo0p4 = vfmaq_lane_f32(vo0p4, vi2x79BD, vget_low_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x79BD, vget_low_f32(vwGHIJ), 1);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4x79BD, vget_high_f32(vwKLMN), 0);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0xACEG = vextq_f32(vi0x8ACE, vzero, 1);
const float32x4_t vi1xACEG = vextq_f32(vi1x8ACE, vzero, 1);
const float32x4_t vi2xACEG = vextq_f32(vi2x8ACE, vzero, 1);
const float32x4_t vi3xACEG = vextq_f32(vi3x8ACE, vzero, 1);
const float32x4_t vi4xACEG = vextq_f32(vi4x8ACE, vzero, 1);
vo0p2 = vfmaq_lane_f32(vo0p2, vi0xACEG, vget_low_f32(vw4567), 1);
vo0p3 = vfmaq_lane_f32(vo0p3, vi1xACEG, vget_high_f32(vw89AB), 0);
vo0p4 = vfmaq_lane_f32(vo0p4, vi2xACEG, vget_high_f32(vwCDEF), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3xACEG, vget_low_f32(vwKLMN), 0);
vo0p1 = vfmaq_lane_f32(vo0p1, vi4xACEG, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
vo0p0 = vaddq_f32(vo0p0, vo0p4);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w_tmp & 2) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w_tmp & 1) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 13,068 | 40.227129 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-aarch64-neonfma-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__aarch64_neonfma_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const uint32x4_t vmask_even = vld1q_u32(params->neon_stride2.mask_even);
const uint32x4_t vmask_odd = vld1q_u32(params->neon_stride2.mask_odd);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride2.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride2.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
float32x4_t vi0x0246 = vmovq_n_f32(0.0f);
float32x4_t vi1x0246 = vmovq_n_f32(0.0f);
float32x4_t vi2x0246 = vmovq_n_f32(0.0f);
float32x4_t vi3x0246 = vmovq_n_f32(0.0f);
float32x4_t vi4x0246 = vmovq_n_f32(0.0f);
float32x4_t vi0x1357 = vmovq_n_f32(0.0f);
float32x4_t vi1x1357 = vmovq_n_f32(0.0f);
float32x4_t vi2x1357 = vmovq_n_f32(0.0f);
float32x4_t vi3x1357 = vmovq_n_f32(0.0f);
float32x4_t vi4x1357 = vmovq_n_f32(0.0f);
float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0); i0 += 8;
float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1); i1 += 8;
float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2); i2 += 8;
float32x4x2_t vi3x8ACE9BDF = vld2q_f32(i3); i3 += 8;
float32x4x2_t vi4x8ACE9BDF = vld2q_f32(i4); i4 += 8;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x8ACE9BDF.val[0], vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x8ACE9BDF.val[0], vget_low_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x8ACE9BDF.val[0], vget_low_f32(vwCDEF), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x8ACE9BDF.val[0], vget_high_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x8ACE9BDF.val[0], vget_high_f32(vwKLMN), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x8ACE9BDF.val[1], vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x8ACE9BDF.val[1], vget_low_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x8ACE9BDF.val[1], vget_high_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x8ACE9BDF.val[1], vget_high_f32(vwGHIJ), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x8ACE9BDF.val[1], vwOP, 0);
const float32x4_t vi0x68AC = vextq_f32(vi0x0246, vi0x8ACE9BDF.val[0], 3);
vi0x0246 = vi0x8ACE9BDF.val[0];
const float32x4_t vi1x68AC = vextq_f32(vi1x0246, vi1x8ACE9BDF.val[0], 3);
vi1x0246 = vi1x8ACE9BDF.val[0];
const float32x4_t vi2x68AC = vextq_f32(vi2x0246, vi2x8ACE9BDF.val[0], 3);
vi2x0246 = vi2x8ACE9BDF.val[0];
const float32x4_t vi3x68AC = vextq_f32(vi3x0246, vi3x8ACE9BDF.val[0], 3);
vi3x0246 = vi3x8ACE9BDF.val[0];
const float32x4_t vi4x68AC = vextq_f32(vi4x0246, vi4x8ACE9BDF.val[0], 3);
vi4x0246 = vi4x8ACE9BDF.val[0];
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x68AC, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x68AC, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x68AC, vget_high_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x68AC, vget_low_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x68AC, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x8ACE9BDF.val[1], 3);
vi0x1357 = vi0x8ACE9BDF.val[1];
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x8ACE9BDF.val[1], 3);
vi1x1357 = vi1x8ACE9BDF.val[1];
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3);
vi2x1357 = vi2x8ACE9BDF.val[1];
const float32x4_t vi3x79BD = vextq_f32(vi3x1357, vi3x8ACE9BDF.val[1], 3);
vi3x1357 = vi3x8ACE9BDF.val[1];
const float32x4_t vi4x79BD = vextq_f32(vi4x1357, vi4x8ACE9BDF.val[1], 3);
vi4x1357 = vi4x8ACE9BDF.val[1];
const float32x4x2_t vi0xGIKMHJLN = vld2q_f32(i0); i0 += 8;
const float32x4x2_t vi1xGIKMHJLN = vld2q_f32(i1); i1 += 8;
const float32x4x2_t vi2xGIKMHJLN = vld2q_f32(i2); i2 += 8;
const float32x4x2_t vi3xGIKMHJLN = vld2q_f32(i3); i3 += 8;
const float32x4x2_t vi4xGIKMHJLN = vld2q_f32(i4); i4 += 8;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x79BD, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x79BD, vget_high_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x79BD, vget_low_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x79BD, vget_low_f32(vwGHIJ), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x79BD, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0xACEG = vextq_f32(vi0x8ACE9BDF.val[0], vi0xGIKMHJLN.val[0], 1);
vi0x8ACE9BDF = vi0xGIKMHJLN;
const float32x4_t vi1xACEG = vextq_f32(vi1x8ACE9BDF.val[0], vi1xGIKMHJLN.val[0], 1);
vi1x8ACE9BDF = vi1xGIKMHJLN;
const float32x4_t vi2xACEG = vextq_f32(vi2x8ACE9BDF.val[0], vi2xGIKMHJLN.val[0], 1);
vi2x8ACE9BDF = vi2xGIKMHJLN;
const float32x4_t vi3xACEG = vextq_f32(vi3x8ACE9BDF.val[0], vi3xGIKMHJLN.val[0], 1);
vi3x8ACE9BDF = vi3xGIKMHJLN;
const float32x4_t vi4xACEG = vextq_f32(vi4x8ACE9BDF.val[0], vi4xGIKMHJLN.val[0], 1);
vi4x8ACE9BDF = vi4xGIKMHJLN;
vo0p0 = vfmaq_lane_f32(vo0p0, vi0xACEG, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1xACEG, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2xACEG, vget_high_f32(vwCDEF), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3xACEG, vget_low_f32(vwKLMN), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4xACEG, vwOP, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[0])));
const float32x4_t vi1x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[0])));
const float32x4_t vi2x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[0])));
const float32x4_t vi3x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[0])));
const float32x4_t vi4x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[0])));
const float32x4_t vi0x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[1])));
const float32x4_t vi1x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[1])));
const float32x4_t vi2x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[1])));
const float32x4_t vi3x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[1])));
const float32x4_t vi4x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[1])));
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x8ACE, vget_high_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x8ACE, vget_low_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x8ACE, vget_low_f32(vwCDEF), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x8ACE, vget_high_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x8ACE, vget_high_f32(vwKLMN), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x9BDF, vget_low_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x9BDF, vget_low_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x9BDF, vget_high_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x9BDF, vget_high_f32(vwGHIJ), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x9BDF, vwOP, 0);
const float32x4_t vi0x68AC = vextq_f32(vi0x0246, vi0x8ACE, 3);
const float32x4_t vi1x68AC = vextq_f32(vi1x0246, vi1x8ACE, 3);
const float32x4_t vi2x68AC = vextq_f32(vi2x0246, vi2x8ACE, 3);
const float32x4_t vi3x68AC = vextq_f32(vi3x0246, vi3x8ACE, 3);
const float32x4_t vi4x68AC = vextq_f32(vi4x0246, vi4x8ACE, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x68AC, vget_low_f32(vw0123), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x68AC, vget_high_f32(vw4567), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x68AC, vget_high_f32(vw89AB), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x68AC, vget_low_f32(vwGHIJ), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x68AC, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x9BDF, 3);
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x9BDF, 3);
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x9BDF, 3);
const float32x4_t vi3x79BD = vextq_f32(vi3x1357, vi3x9BDF, 3);
const float32x4_t vi4x79BD = vextq_f32(vi4x1357, vi4x9BDF, 3);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0x79BD, vget_high_f32(vw0123), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1x79BD, vget_high_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2x79BD, vget_low_f32(vwCDEF), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3x79BD, vget_low_f32(vwGHIJ), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4x79BD, vget_high_f32(vwKLMN), 0);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0xACEG = vextq_f32(vi0x8ACE, vzero, 1);
const float32x4_t vi1xACEG = vextq_f32(vi1x8ACE, vzero, 1);
const float32x4_t vi2xACEG = vextq_f32(vi2x8ACE, vzero, 1);
const float32x4_t vi3xACEG = vextq_f32(vi3x8ACE, vzero, 1);
const float32x4_t vi4xACEG = vextq_f32(vi4x8ACE, vzero, 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi0xACEG, vget_low_f32(vw4567), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi1xACEG, vget_high_f32(vw89AB), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi2xACEG, vget_high_f32(vwCDEF), 1);
vo0p0 = vfmaq_lane_f32(vo0p0, vi3xACEG, vget_low_f32(vwKLMN), 0);
vo0p0 = vfmaq_lane_f32(vo0p0, vi4xACEG, vwOP, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w_tmp & 2) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w_tmp & 1) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 12,711 | 40.139159 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-neon-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neon_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const uint32x4_t vmask_even = vld1q_u32(params->neon_stride2.mask_even);
const uint32x4_t vmask_odd = vld1q_u32(params->neon_stride2.mask_odd);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride2.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride2.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
float32x4_t vi0x0246 = vmovq_n_f32(0.0f);
float32x4_t vi1x0246 = vmovq_n_f32(0.0f);
float32x4_t vi2x0246 = vmovq_n_f32(0.0f);
float32x4_t vi3x0246 = vmovq_n_f32(0.0f);
float32x4_t vi4x0246 = vmovq_n_f32(0.0f);
float32x4_t vi0x1357 = vmovq_n_f32(0.0f);
float32x4_t vi1x1357 = vmovq_n_f32(0.0f);
float32x4_t vi2x1357 = vmovq_n_f32(0.0f);
float32x4_t vi3x1357 = vmovq_n_f32(0.0f);
float32x4_t vi4x1357 = vmovq_n_f32(0.0f);
float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0); i0 += 8;
float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1); i1 += 8;
float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2); i2 += 8;
float32x4x2_t vi3x8ACE9BDF = vld2q_f32(i3); i3 += 8;
float32x4x2_t vi4x8ACE9BDF = vld2q_f32(i4); i4 += 8;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE9BDF.val[0], vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x8ACE9BDF.val[0], vget_low_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x8ACE9BDF.val[0], vget_low_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x8ACE9BDF.val[0], vget_high_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x8ACE9BDF.val[0], vget_high_f32(vwKLMN), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x8ACE9BDF.val[1], vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x8ACE9BDF.val[1], vget_low_f32(vw89AB), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x8ACE9BDF.val[1], vget_high_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x8ACE9BDF.val[1], vget_high_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x8ACE9BDF.val[1], vwOP, 0);
const float32x4_t vi0x68AC = vextq_f32(vi0x0246, vi0x8ACE9BDF.val[0], 3);
vi0x0246 = vi0x8ACE9BDF.val[0];
const float32x4_t vi1x68AC = vextq_f32(vi1x0246, vi1x8ACE9BDF.val[0], 3);
vi1x0246 = vi1x8ACE9BDF.val[0];
const float32x4_t vi2x68AC = vextq_f32(vi2x0246, vi2x8ACE9BDF.val[0], 3);
vi2x0246 = vi2x8ACE9BDF.val[0];
const float32x4_t vi3x68AC = vextq_f32(vi3x0246, vi3x8ACE9BDF.val[0], 3);
vi3x0246 = vi3x8ACE9BDF.val[0];
const float32x4_t vi4x68AC = vextq_f32(vi4x0246, vi4x8ACE9BDF.val[0], 3);
vi4x0246 = vi4x8ACE9BDF.val[0];
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x68AC, vget_low_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x68AC, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x68AC, vget_high_f32(vw89AB), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x68AC, vget_low_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x68AC, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x8ACE9BDF.val[1], 3);
vi0x1357 = vi0x8ACE9BDF.val[1];
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x8ACE9BDF.val[1], 3);
vi1x1357 = vi1x8ACE9BDF.val[1];
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3);
vi2x1357 = vi2x8ACE9BDF.val[1];
const float32x4_t vi3x79BD = vextq_f32(vi3x1357, vi3x8ACE9BDF.val[1], 3);
vi3x1357 = vi3x8ACE9BDF.val[1];
const float32x4_t vi4x79BD = vextq_f32(vi4x1357, vi4x8ACE9BDF.val[1], 3);
vi4x1357 = vi4x8ACE9BDF.val[1];
const float32x4x2_t vi0xGIKMHJLN = vld2q_f32(i0); i0 += 8;
const float32x4x2_t vi1xGIKMHJLN = vld2q_f32(i1); i1 += 8;
const float32x4x2_t vi2xGIKMHJLN = vld2q_f32(i2); i2 += 8;
const float32x4x2_t vi3xGIKMHJLN = vld2q_f32(i3); i3 += 8;
const float32x4x2_t vi4xGIKMHJLN = vld2q_f32(i4); i4 += 8;
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x79BD, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x79BD, vget_high_f32(vw4567), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x79BD, vget_low_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x79BD, vget_low_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x79BD, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0xACEG = vextq_f32(vi0x8ACE9BDF.val[0], vi0xGIKMHJLN.val[0], 1);
vi0x8ACE9BDF = vi0xGIKMHJLN;
const float32x4_t vi1xACEG = vextq_f32(vi1x8ACE9BDF.val[0], vi1xGIKMHJLN.val[0], 1);
vi1x8ACE9BDF = vi1xGIKMHJLN;
const float32x4_t vi2xACEG = vextq_f32(vi2x8ACE9BDF.val[0], vi2xGIKMHJLN.val[0], 1);
vi2x8ACE9BDF = vi2xGIKMHJLN;
const float32x4_t vi3xACEG = vextq_f32(vi3x8ACE9BDF.val[0], vi3xGIKMHJLN.val[0], 1);
vi3x8ACE9BDF = vi3xGIKMHJLN;
const float32x4_t vi4xACEG = vextq_f32(vi4x8ACE9BDF.val[0], vi4xGIKMHJLN.val[0], 1);
vi4x8ACE9BDF = vi4xGIKMHJLN;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0xACEG, vget_low_f32(vw4567), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1xACEG, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2xACEG, vget_high_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3xACEG, vget_low_f32(vwKLMN), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4xACEG, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[0])));
const float32x4_t vi1x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[0])));
const float32x4_t vi2x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[0])));
const float32x4_t vi3x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[0])));
const float32x4_t vi4x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[0])));
const float32x4_t vi0x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[1])));
const float32x4_t vi1x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[1])));
const float32x4_t vi2x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[1])));
const float32x4_t vi3x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[1])));
const float32x4_t vi4x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[1])));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x8ACE, vget_low_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x8ACE, vget_low_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x8ACE, vget_high_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x8ACE, vget_high_f32(vwKLMN), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x9BDF, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x9BDF, vget_low_f32(vw89AB), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x9BDF, vget_high_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x9BDF, vget_high_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x9BDF, vwOP, 0);
const float32x4_t vi0x68AC = vextq_f32(vi0x0246, vi0x8ACE, 3);
const float32x4_t vi1x68AC = vextq_f32(vi1x0246, vi1x8ACE, 3);
const float32x4_t vi2x68AC = vextq_f32(vi2x0246, vi2x8ACE, 3);
const float32x4_t vi3x68AC = vextq_f32(vi3x0246, vi3x8ACE, 3);
const float32x4_t vi4x68AC = vextq_f32(vi4x0246, vi4x8ACE, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x68AC, vget_low_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x68AC, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x68AC, vget_high_f32(vw89AB), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x68AC, vget_low_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x68AC, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x9BDF, 3);
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x9BDF, 3);
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x9BDF, 3);
const float32x4_t vi3x79BD = vextq_f32(vi3x1357, vi3x9BDF, 3);
const float32x4_t vi4x79BD = vextq_f32(vi4x1357, vi4x9BDF, 3);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x79BD, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x79BD, vget_high_f32(vw4567), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x79BD, vget_low_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x79BD, vget_low_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x79BD, vget_high_f32(vwKLMN), 0);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0xACEG = vextq_f32(vi0x8ACE, vzero, 1);
const float32x4_t vi1xACEG = vextq_f32(vi1x8ACE, vzero, 1);
const float32x4_t vi2xACEG = vextq_f32(vi2x8ACE, vzero, 1);
const float32x4_t vi3xACEG = vextq_f32(vi3x8ACE, vzero, 1);
const float32x4_t vi4xACEG = vextq_f32(vi4x8ACE, vzero, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0xACEG, vget_low_f32(vw4567), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1xACEG, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2xACEG, vget_high_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3xACEG, vget_low_f32(vwKLMN), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4xACEG, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w_tmp & 2) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w_tmp & 1) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 12,793 | 40.138264 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-neon-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neon_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const uint32x4_t vmask_even = vld1q_u32(params->neon_stride2.mask_even);
const uint32x4_t vmask_odd = vld1q_u32(params->neon_stride2.mask_odd);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride2.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride2.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
float32x4_t vi0x0246 = vmovq_n_f32(0.0f);
float32x4_t vi1x0246 = vmovq_n_f32(0.0f);
float32x4_t vi2x0246 = vmovq_n_f32(0.0f);
float32x4_t vi3x0246 = vmovq_n_f32(0.0f);
float32x4_t vi4x0246 = vmovq_n_f32(0.0f);
float32x4_t vi0x1357 = vmovq_n_f32(0.0f);
float32x4_t vi1x1357 = vmovq_n_f32(0.0f);
float32x4_t vi2x1357 = vmovq_n_f32(0.0f);
float32x4_t vi3x1357 = vmovq_n_f32(0.0f);
float32x4_t vi4x1357 = vmovq_n_f32(0.0f);
float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0); i0 += 8;
float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1); i1 += 8;
float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2); i2 += 8;
float32x4x2_t vi3x8ACE9BDF = vld2q_f32(i3); i3 += 8;
float32x4x2_t vi4x8ACE9BDF = vld2q_f32(i4); i4 += 8;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE9BDF.val[0], vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x8ACE9BDF.val[0], vget_low_f32(vw89AB), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x8ACE9BDF.val[0], vget_low_f32(vwCDEF), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi3x8ACE9BDF.val[0], vget_high_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x8ACE9BDF.val[0], vget_high_f32(vwKLMN), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x8ACE9BDF.val[1], vget_low_f32(vw4567), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi1x8ACE9BDF.val[1], vget_low_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x8ACE9BDF.val[1], vget_high_f32(vwCDEF), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x8ACE9BDF.val[1], vget_high_f32(vwGHIJ), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4x8ACE9BDF.val[1], vwOP, 0);
const float32x4_t vi0x68AC = vextq_f32(vi0x0246, vi0x8ACE9BDF.val[0], 3);
vi0x0246 = vi0x8ACE9BDF.val[0];
const float32x4_t vi1x68AC = vextq_f32(vi1x0246, vi1x8ACE9BDF.val[0], 3);
vi1x0246 = vi1x8ACE9BDF.val[0];
const float32x4_t vi2x68AC = vextq_f32(vi2x0246, vi2x8ACE9BDF.val[0], 3);
vi2x0246 = vi2x8ACE9BDF.val[0];
const float32x4_t vi3x68AC = vextq_f32(vi3x0246, vi3x8ACE9BDF.val[0], 3);
vi3x0246 = vi3x8ACE9BDF.val[0];
const float32x4_t vi4x68AC = vextq_f32(vi4x0246, vi4x8ACE9BDF.val[0], 3);
vi4x0246 = vi4x8ACE9BDF.val[0];
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x68AC, vget_low_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x68AC, vget_high_f32(vw4567), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi2x68AC, vget_high_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x68AC, vget_low_f32(vwGHIJ), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x68AC, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x8ACE9BDF.val[1], 3);
vi0x1357 = vi0x8ACE9BDF.val[1];
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x8ACE9BDF.val[1], 3);
vi1x1357 = vi1x8ACE9BDF.val[1];
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3);
vi2x1357 = vi2x8ACE9BDF.val[1];
const float32x4_t vi3x79BD = vextq_f32(vi3x1357, vi3x8ACE9BDF.val[1], 3);
vi3x1357 = vi3x8ACE9BDF.val[1];
const float32x4_t vi4x79BD = vextq_f32(vi4x1357, vi4x8ACE9BDF.val[1], 3);
vi4x1357 = vi4x8ACE9BDF.val[1];
const float32x4x2_t vi0xGIKMHJLN = vld2q_f32(i0); i0 += 8;
const float32x4x2_t vi1xGIKMHJLN = vld2q_f32(i1); i1 += 8;
const float32x4x2_t vi2xGIKMHJLN = vld2q_f32(i2); i2 += 8;
const float32x4x2_t vi3xGIKMHJLN = vld2q_f32(i3); i3 += 8;
const float32x4x2_t vi4xGIKMHJLN = vld2q_f32(i4); i4 += 8;
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x79BD, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x79BD, vget_high_f32(vw4567), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x79BD, vget_low_f32(vwCDEF), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi3x79BD, vget_low_f32(vwGHIJ), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x79BD, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0xACEG = vextq_f32(vi0x8ACE9BDF.val[0], vi0xGIKMHJLN.val[0], 1);
vi0x8ACE9BDF = vi0xGIKMHJLN;
const float32x4_t vi1xACEG = vextq_f32(vi1x8ACE9BDF.val[0], vi1xGIKMHJLN.val[0], 1);
vi1x8ACE9BDF = vi1xGIKMHJLN;
const float32x4_t vi2xACEG = vextq_f32(vi2x8ACE9BDF.val[0], vi2xGIKMHJLN.val[0], 1);
vi2x8ACE9BDF = vi2xGIKMHJLN;
const float32x4_t vi3xACEG = vextq_f32(vi3x8ACE9BDF.val[0], vi3xGIKMHJLN.val[0], 1);
vi3x8ACE9BDF = vi3xGIKMHJLN;
const float32x4_t vi4xACEG = vextq_f32(vi4x8ACE9BDF.val[0], vi4xGIKMHJLN.val[0], 1);
vi4x8ACE9BDF = vi4xGIKMHJLN;
vo0p1 = vmlaq_lane_f32(vo0p1, vi0xACEG, vget_low_f32(vw4567), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi1xACEG, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2xACEG, vget_high_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3xACEG, vget_low_f32(vwKLMN), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4xACEG, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[0])));
const float32x4_t vi1x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[0])));
const float32x4_t vi2x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[0])));
const float32x4_t vi3x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[0])));
const float32x4_t vi4x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[0])));
const float32x4_t vi0x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[1])));
const float32x4_t vi1x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[1])));
const float32x4_t vi2x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[1])));
const float32x4_t vi3x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[1])));
const float32x4_t vi4x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[1])));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x8ACE, vget_low_f32(vw89AB), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x8ACE, vget_low_f32(vwCDEF), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi3x8ACE, vget_high_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x8ACE, vget_high_f32(vwKLMN), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x9BDF, vget_low_f32(vw4567), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi1x9BDF, vget_low_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x9BDF, vget_high_f32(vwCDEF), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x9BDF, vget_high_f32(vwGHIJ), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4x9BDF, vwOP, 0);
const float32x4_t vi0x68AC = vextq_f32(vi0x0246, vi0x8ACE, 3);
const float32x4_t vi1x68AC = vextq_f32(vi1x0246, vi1x8ACE, 3);
const float32x4_t vi2x68AC = vextq_f32(vi2x0246, vi2x8ACE, 3);
const float32x4_t vi3x68AC = vextq_f32(vi3x0246, vi3x8ACE, 3);
const float32x4_t vi4x68AC = vextq_f32(vi4x0246, vi4x8ACE, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x68AC, vget_low_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x68AC, vget_high_f32(vw4567), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi2x68AC, vget_high_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x68AC, vget_low_f32(vwGHIJ), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x68AC, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x9BDF, 3);
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x9BDF, 3);
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x9BDF, 3);
const float32x4_t vi3x79BD = vextq_f32(vi3x1357, vi3x9BDF, 3);
const float32x4_t vi4x79BD = vextq_f32(vi4x1357, vi4x9BDF, 3);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x79BD, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x79BD, vget_high_f32(vw4567), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x79BD, vget_low_f32(vwCDEF), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi3x79BD, vget_low_f32(vwGHIJ), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x79BD, vget_high_f32(vwKLMN), 0);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0xACEG = vextq_f32(vi0x8ACE, vzero, 1);
const float32x4_t vi1xACEG = vextq_f32(vi1x8ACE, vzero, 1);
const float32x4_t vi2xACEG = vextq_f32(vi2x8ACE, vzero, 1);
const float32x4_t vi3xACEG = vextq_f32(vi3x8ACE, vzero, 1);
const float32x4_t vi4xACEG = vextq_f32(vi4x8ACE, vzero, 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0xACEG, vget_low_f32(vw4567), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi1xACEG, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2xACEG, vget_high_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3xACEG, vget_low_f32(vwKLMN), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4xACEG, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w_tmp & 2) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w_tmp & 1) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 12,881 | 40.15655 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-neon-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neon_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const uint32x4_t vmask_even = vld1q_u32(params->neon_stride2.mask_even);
const uint32x4_t vmask_odd = vld1q_u32(params->neon_stride2.mask_odd);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride2.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride2.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
float32x4_t vi0x0246 = vmovq_n_f32(0.0f);
float32x4_t vi1x0246 = vmovq_n_f32(0.0f);
float32x4_t vi2x0246 = vmovq_n_f32(0.0f);
float32x4_t vi3x0246 = vmovq_n_f32(0.0f);
float32x4_t vi4x0246 = vmovq_n_f32(0.0f);
float32x4_t vi0x1357 = vmovq_n_f32(0.0f);
float32x4_t vi1x1357 = vmovq_n_f32(0.0f);
float32x4_t vi2x1357 = vmovq_n_f32(0.0f);
float32x4_t vi3x1357 = vmovq_n_f32(0.0f);
float32x4_t vi4x1357 = vmovq_n_f32(0.0f);
float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0); i0 += 8;
float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1); i1 += 8;
float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2); i2 += 8;
float32x4x2_t vi3x8ACE9BDF = vld2q_f32(i3); i3 += 8;
float32x4x2_t vi4x8ACE9BDF = vld2q_f32(i4); i4 += 8;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE9BDF.val[0], vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x8ACE9BDF.val[0], vget_low_f32(vw89AB), 0);
float32x4_t vo0p3 = vmulq_lane_f32(vi2x8ACE9BDF.val[0], vget_low_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x8ACE9BDF.val[0], vget_high_f32(vwGHIJ), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4x8ACE9BDF.val[0], vget_high_f32(vwKLMN), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi0x8ACE9BDF.val[1], vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x8ACE9BDF.val[1], vget_low_f32(vw89AB), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x8ACE9BDF.val[1], vget_high_f32(vwCDEF), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi3x8ACE9BDF.val[1], vget_high_f32(vwGHIJ), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi4x8ACE9BDF.val[1], vwOP, 0);
const float32x4_t vi0x68AC = vextq_f32(vi0x0246, vi0x8ACE9BDF.val[0], 3);
vi0x0246 = vi0x8ACE9BDF.val[0];
const float32x4_t vi1x68AC = vextq_f32(vi1x0246, vi1x8ACE9BDF.val[0], 3);
vi1x0246 = vi1x8ACE9BDF.val[0];
const float32x4_t vi2x68AC = vextq_f32(vi2x0246, vi2x8ACE9BDF.val[0], 3);
vi2x0246 = vi2x8ACE9BDF.val[0];
const float32x4_t vi3x68AC = vextq_f32(vi3x0246, vi3x8ACE9BDF.val[0], 3);
vi3x0246 = vi3x8ACE9BDF.val[0];
const float32x4_t vi4x68AC = vextq_f32(vi4x0246, vi4x8ACE9BDF.val[0], 3);
vi4x0246 = vi4x8ACE9BDF.val[0];
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x68AC, vget_low_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x68AC, vget_high_f32(vw4567), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi2x68AC, vget_high_f32(vw89AB), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi3x68AC, vget_low_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x68AC, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x8ACE9BDF.val[1], 3);
vi0x1357 = vi0x8ACE9BDF.val[1];
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x8ACE9BDF.val[1], 3);
vi1x1357 = vi1x8ACE9BDF.val[1];
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3);
vi2x1357 = vi2x8ACE9BDF.val[1];
const float32x4_t vi3x79BD = vextq_f32(vi3x1357, vi3x8ACE9BDF.val[1], 3);
vi3x1357 = vi3x8ACE9BDF.val[1];
const float32x4_t vi4x79BD = vextq_f32(vi4x1357, vi4x8ACE9BDF.val[1], 3);
vi4x1357 = vi4x8ACE9BDF.val[1];
const float32x4x2_t vi0xGIKMHJLN = vld2q_f32(i0); i0 += 8;
const float32x4x2_t vi1xGIKMHJLN = vld2q_f32(i1); i1 += 8;
const float32x4x2_t vi2xGIKMHJLN = vld2q_f32(i2); i2 += 8;
const float32x4x2_t vi3xGIKMHJLN = vld2q_f32(i3); i3 += 8;
const float32x4x2_t vi4xGIKMHJLN = vld2q_f32(i4); i4 += 8;
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x79BD, vget_high_f32(vw0123), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi1x79BD, vget_high_f32(vw4567), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi2x79BD, vget_low_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x79BD, vget_low_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x79BD, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0xACEG = vextq_f32(vi0x8ACE9BDF.val[0], vi0xGIKMHJLN.val[0], 1);
vi0x8ACE9BDF = vi0xGIKMHJLN;
const float32x4_t vi1xACEG = vextq_f32(vi1x8ACE9BDF.val[0], vi1xGIKMHJLN.val[0], 1);
vi1x8ACE9BDF = vi1xGIKMHJLN;
const float32x4_t vi2xACEG = vextq_f32(vi2x8ACE9BDF.val[0], vi2xGIKMHJLN.val[0], 1);
vi2x8ACE9BDF = vi2xGIKMHJLN;
const float32x4_t vi3xACEG = vextq_f32(vi3x8ACE9BDF.val[0], vi3xGIKMHJLN.val[0], 1);
vi3x8ACE9BDF = vi3xGIKMHJLN;
const float32x4_t vi4xACEG = vextq_f32(vi4x8ACE9BDF.val[0], vi4xGIKMHJLN.val[0], 1);
vi4x8ACE9BDF = vi4xGIKMHJLN;
vo0p2 = vmlaq_lane_f32(vo0p2, vi0xACEG, vget_low_f32(vw4567), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1xACEG, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2xACEG, vget_high_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3xACEG, vget_low_f32(vwKLMN), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4xACEG, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[0])));
const float32x4_t vi1x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[0])));
const float32x4_t vi2x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[0])));
const float32x4_t vi3x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[0])));
const float32x4_t vi4x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[0])));
const float32x4_t vi0x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[1])));
const float32x4_t vi1x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[1])));
const float32x4_t vi2x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[1])));
const float32x4_t vi3x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[1])));
const float32x4_t vi4x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[1])));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x8ACE, vget_low_f32(vw89AB), 0);
float32x4_t vo0p3 = vmulq_lane_f32(vi2x8ACE, vget_low_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3x8ACE, vget_high_f32(vwGHIJ), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4x8ACE, vget_high_f32(vwKLMN), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi0x9BDF, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x9BDF, vget_low_f32(vw89AB), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi2x9BDF, vget_high_f32(vwCDEF), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi3x9BDF, vget_high_f32(vwGHIJ), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi4x9BDF, vwOP, 0);
const float32x4_t vi0x68AC = vextq_f32(vi0x0246, vi0x8ACE, 3);
const float32x4_t vi1x68AC = vextq_f32(vi1x0246, vi1x8ACE, 3);
const float32x4_t vi2x68AC = vextq_f32(vi2x0246, vi2x8ACE, 3);
const float32x4_t vi3x68AC = vextq_f32(vi3x0246, vi3x8ACE, 3);
const float32x4_t vi4x68AC = vextq_f32(vi4x0246, vi4x8ACE, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x68AC, vget_low_f32(vw0123), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi1x68AC, vget_high_f32(vw4567), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi2x68AC, vget_high_f32(vw89AB), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi3x68AC, vget_low_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x68AC, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x9BDF, 3);
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x9BDF, 3);
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x9BDF, 3);
const float32x4_t vi3x79BD = vextq_f32(vi3x1357, vi3x9BDF, 3);
const float32x4_t vi4x79BD = vextq_f32(vi4x1357, vi4x9BDF, 3);
vo0p1 = vmlaq_lane_f32(vo0p1, vi0x79BD, vget_high_f32(vw0123), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi1x79BD, vget_high_f32(vw4567), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi2x79BD, vget_low_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x79BD, vget_low_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x79BD, vget_high_f32(vwKLMN), 0);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0xACEG = vextq_f32(vi0x8ACE, vzero, 1);
const float32x4_t vi1xACEG = vextq_f32(vi1x8ACE, vzero, 1);
const float32x4_t vi2xACEG = vextq_f32(vi2x8ACE, vzero, 1);
const float32x4_t vi3xACEG = vextq_f32(vi3x8ACE, vzero, 1);
const float32x4_t vi4xACEG = vextq_f32(vi4x8ACE, vzero, 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0xACEG, vget_low_f32(vw4567), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1xACEG, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2xACEG, vget_high_f32(vwCDEF), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi3xACEG, vget_low_f32(vwKLMN), 0);
vo0p2 = vmlaq_lane_f32(vo0p2, vi4xACEG, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w_tmp & 2) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w_tmp & 1) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 12,969 | 40.174603 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-neon-1x4-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neon_1x4_acc5(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const uint32x4_t vmask_even = vld1q_u32(params->neon_stride2.mask_even);
const uint32x4_t vmask_odd = vld1q_u32(params->neon_stride2.mask_odd);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride2.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride2.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
float32x4_t vi0x0246 = vmovq_n_f32(0.0f);
float32x4_t vi1x0246 = vmovq_n_f32(0.0f);
float32x4_t vi2x0246 = vmovq_n_f32(0.0f);
float32x4_t vi3x0246 = vmovq_n_f32(0.0f);
float32x4_t vi4x0246 = vmovq_n_f32(0.0f);
float32x4_t vi0x1357 = vmovq_n_f32(0.0f);
float32x4_t vi1x1357 = vmovq_n_f32(0.0f);
float32x4_t vi2x1357 = vmovq_n_f32(0.0f);
float32x4_t vi3x1357 = vmovq_n_f32(0.0f);
float32x4_t vi4x1357 = vmovq_n_f32(0.0f);
float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0); i0 += 8;
float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1); i1 += 8;
float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2); i2 += 8;
float32x4x2_t vi3x8ACE9BDF = vld2q_f32(i3); i3 += 8;
float32x4x2_t vi4x8ACE9BDF = vld2q_f32(i4); i4 += 8;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE9BDF.val[0], vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x8ACE9BDF.val[0], vget_low_f32(vw89AB), 0);
float32x4_t vo0p3 = vmulq_lane_f32(vi2x8ACE9BDF.val[0], vget_low_f32(vwCDEF), 1);
float32x4_t vo0p4 = vmulq_lane_f32(vi3x8ACE9BDF.val[0], vget_high_f32(vwGHIJ), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x8ACE9BDF.val[0], vget_high_f32(vwKLMN), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x8ACE9BDF.val[1], vget_low_f32(vw4567), 0);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1x8ACE9BDF.val[1], vget_low_f32(vw89AB), 1);
vo0p4 = vmlaq_lane_f32(vo0p4, vi2x8ACE9BDF.val[1], vget_high_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x8ACE9BDF.val[1], vget_high_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x8ACE9BDF.val[1], vwOP, 0);
const float32x4_t vi0x68AC = vextq_f32(vi0x0246, vi0x8ACE9BDF.val[0], 3);
vi0x0246 = vi0x8ACE9BDF.val[0];
const float32x4_t vi1x68AC = vextq_f32(vi1x0246, vi1x8ACE9BDF.val[0], 3);
vi1x0246 = vi1x8ACE9BDF.val[0];
const float32x4_t vi2x68AC = vextq_f32(vi2x0246, vi2x8ACE9BDF.val[0], 3);
vi2x0246 = vi2x8ACE9BDF.val[0];
const float32x4_t vi3x68AC = vextq_f32(vi3x0246, vi3x8ACE9BDF.val[0], 3);
vi3x0246 = vi3x8ACE9BDF.val[0];
const float32x4_t vi4x68AC = vextq_f32(vi4x0246, vi4x8ACE9BDF.val[0], 3);
vi4x0246 = vi4x8ACE9BDF.val[0];
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x68AC, vget_low_f32(vw0123), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1x68AC, vget_high_f32(vw4567), 0);
vo0p4 = vmlaq_lane_f32(vo0p4, vi2x68AC, vget_high_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x68AC, vget_low_f32(vwGHIJ), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x68AC, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x8ACE9BDF.val[1], 3);
vi0x1357 = vi0x8ACE9BDF.val[1];
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x8ACE9BDF.val[1], 3);
vi1x1357 = vi1x8ACE9BDF.val[1];
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3);
vi2x1357 = vi2x8ACE9BDF.val[1];
const float32x4_t vi3x79BD = vextq_f32(vi3x1357, vi3x8ACE9BDF.val[1], 3);
vi3x1357 = vi3x8ACE9BDF.val[1];
const float32x4_t vi4x79BD = vextq_f32(vi4x1357, vi4x8ACE9BDF.val[1], 3);
vi4x1357 = vi4x8ACE9BDF.val[1];
const float32x4x2_t vi0xGIKMHJLN = vld2q_f32(i0); i0 += 8;
const float32x4x2_t vi1xGIKMHJLN = vld2q_f32(i1); i1 += 8;
const float32x4x2_t vi2xGIKMHJLN = vld2q_f32(i2); i2 += 8;
const float32x4x2_t vi3xGIKMHJLN = vld2q_f32(i3); i3 += 8;
const float32x4x2_t vi4xGIKMHJLN = vld2q_f32(i4); i4 += 8;
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x79BD, vget_high_f32(vw0123), 0);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1x79BD, vget_high_f32(vw4567), 1);
vo0p4 = vmlaq_lane_f32(vo0p4, vi2x79BD, vget_low_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x79BD, vget_low_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x79BD, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0xACEG = vextq_f32(vi0x8ACE9BDF.val[0], vi0xGIKMHJLN.val[0], 1);
vi0x8ACE9BDF = vi0xGIKMHJLN;
const float32x4_t vi1xACEG = vextq_f32(vi1x8ACE9BDF.val[0], vi1xGIKMHJLN.val[0], 1);
vi1x8ACE9BDF = vi1xGIKMHJLN;
const float32x4_t vi2xACEG = vextq_f32(vi2x8ACE9BDF.val[0], vi2xGIKMHJLN.val[0], 1);
vi2x8ACE9BDF = vi2xGIKMHJLN;
const float32x4_t vi3xACEG = vextq_f32(vi3x8ACE9BDF.val[0], vi3xGIKMHJLN.val[0], 1);
vi3x8ACE9BDF = vi3xGIKMHJLN;
const float32x4_t vi4xACEG = vextq_f32(vi4x8ACE9BDF.val[0], vi4xGIKMHJLN.val[0], 1);
vi4x8ACE9BDF = vi4xGIKMHJLN;
vo0p2 = vmlaq_lane_f32(vo0p2, vi0xACEG, vget_low_f32(vw4567), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1xACEG, vget_high_f32(vw89AB), 0);
vo0p4 = vmlaq_lane_f32(vo0p4, vi2xACEG, vget_high_f32(vwCDEF), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3xACEG, vget_low_f32(vwKLMN), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4xACEG, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
vo0p0 = vaddq_f32(vo0p0, vo0p4);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[0])));
const float32x4_t vi1x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[0])));
const float32x4_t vi2x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[0])));
const float32x4_t vi3x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[0])));
const float32x4_t vi4x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[0])));
const float32x4_t vi0x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[1])));
const float32x4_t vi1x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[1])));
const float32x4_t vi2x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[1])));
const float32x4_t vi3x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[1])));
const float32x4_t vi4x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[1])));
float32x4_t vo0p1 = vmulq_lane_f32(vi0x8ACE, vget_high_f32(vw0123), 1);
float32x4_t vo0p2 = vmulq_lane_f32(vi1x8ACE, vget_low_f32(vw89AB), 0);
float32x4_t vo0p3 = vmulq_lane_f32(vi2x8ACE, vget_low_f32(vwCDEF), 1);
float32x4_t vo0p4 = vmulq_lane_f32(vi3x8ACE, vget_high_f32(vwGHIJ), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x8ACE, vget_high_f32(vwKLMN), 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x9BDF, vget_low_f32(vw4567), 0);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1x9BDF, vget_low_f32(vw89AB), 1);
vo0p4 = vmlaq_lane_f32(vo0p4, vi2x9BDF, vget_high_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x9BDF, vget_high_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x9BDF, vwOP, 0);
const float32x4_t vi0x68AC = vextq_f32(vi0x0246, vi0x8ACE, 3);
const float32x4_t vi1x68AC = vextq_f32(vi1x0246, vi1x8ACE, 3);
const float32x4_t vi2x68AC = vextq_f32(vi2x0246, vi2x8ACE, 3);
const float32x4_t vi3x68AC = vextq_f32(vi3x0246, vi3x8ACE, 3);
const float32x4_t vi4x68AC = vextq_f32(vi4x0246, vi4x8ACE, 3);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x68AC, vget_low_f32(vw0123), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1x68AC, vget_high_f32(vw4567), 0);
vo0p4 = vmlaq_lane_f32(vo0p4, vi2x68AC, vget_high_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x68AC, vget_low_f32(vwGHIJ), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x68AC, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x9BDF, 3);
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x9BDF, 3);
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x9BDF, 3);
const float32x4_t vi3x79BD = vextq_f32(vi3x1357, vi3x9BDF, 3);
const float32x4_t vi4x79BD = vextq_f32(vi4x1357, vi4x9BDF, 3);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0x79BD, vget_high_f32(vw0123), 0);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1x79BD, vget_high_f32(vw4567), 1);
vo0p4 = vmlaq_lane_f32(vo0p4, vi2x79BD, vget_low_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x79BD, vget_low_f32(vwGHIJ), 1);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4x79BD, vget_high_f32(vwKLMN), 0);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0xACEG = vextq_f32(vi0x8ACE, vzero, 1);
const float32x4_t vi1xACEG = vextq_f32(vi1x8ACE, vzero, 1);
const float32x4_t vi2xACEG = vextq_f32(vi2x8ACE, vzero, 1);
const float32x4_t vi3xACEG = vextq_f32(vi3x8ACE, vzero, 1);
const float32x4_t vi4xACEG = vextq_f32(vi4x8ACE, vzero, 1);
vo0p2 = vmlaq_lane_f32(vo0p2, vi0xACEG, vget_low_f32(vw4567), 1);
vo0p3 = vmlaq_lane_f32(vo0p3, vi1xACEG, vget_high_f32(vw89AB), 0);
vo0p4 = vmlaq_lane_f32(vo0p4, vi2xACEG, vget_high_f32(vwCDEF), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3xACEG, vget_low_f32(vwKLMN), 0);
vo0p1 = vmlaq_lane_f32(vo0p1, vi4xACEG, vwOP, 1);
vo0p0 = vaddq_f32(vo0p0, vo0p1);
vo0p2 = vaddq_f32(vo0p2, vo0p3);
vo0p0 = vaddq_f32(vo0p0, vo0p2);
vo0p0 = vaddq_f32(vo0p0, vo0p4);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w_tmp & 2) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w_tmp & 1) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 13,057 | 40.192429 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-neon-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__neon_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const uint32x4_t vmask_even = vld1q_u32(params->neon_stride2.mask_even);
const uint32x4_t vmask_odd = vld1q_u32(params->neon_stride2.mask_odd);
const float32x4_t vmax = vld1q_dup_f32(¶ms->neon_stride2.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->neon_stride2.min);
const float32x4_t vw0123 = vld1q_f32(weights);
const float32x4_t vw4567 = vld1q_f32(weights + 4);
const float32x4_t vw89AB = vld1q_f32(weights + 8);
const float32x4_t vwCDEF = vld1q_f32(weights + 12);
const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
const float32x4_t vwKLMN = vld1q_f32(weights + 20);
const float32x2_t vwOP = vld1_f32(weights + 24);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
float32x4_t vi0x0246 = vmovq_n_f32(0.0f);
float32x4_t vi1x0246 = vmovq_n_f32(0.0f);
float32x4_t vi2x0246 = vmovq_n_f32(0.0f);
float32x4_t vi3x0246 = vmovq_n_f32(0.0f);
float32x4_t vi4x0246 = vmovq_n_f32(0.0f);
float32x4_t vi0x1357 = vmovq_n_f32(0.0f);
float32x4_t vi1x1357 = vmovq_n_f32(0.0f);
float32x4_t vi2x1357 = vmovq_n_f32(0.0f);
float32x4_t vi3x1357 = vmovq_n_f32(0.0f);
float32x4_t vi4x1357 = vmovq_n_f32(0.0f);
float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0); i0 += 8;
float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1); i1 += 8;
float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2); i2 += 8;
float32x4x2_t vi3x8ACE9BDF = vld2q_f32(i3); i3 += 8;
float32x4x2_t vi4x8ACE9BDF = vld2q_f32(i4); i4 += 8;
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x8ACE9BDF.val[0], vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x8ACE9BDF.val[0], vget_low_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x8ACE9BDF.val[0], vget_low_f32(vwCDEF), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x8ACE9BDF.val[0], vget_high_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x8ACE9BDF.val[0], vget_high_f32(vwKLMN), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x8ACE9BDF.val[1], vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x8ACE9BDF.val[1], vget_low_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x8ACE9BDF.val[1], vget_high_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x8ACE9BDF.val[1], vget_high_f32(vwGHIJ), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x8ACE9BDF.val[1], vwOP, 0);
const float32x4_t vi0x68AC = vextq_f32(vi0x0246, vi0x8ACE9BDF.val[0], 3);
vi0x0246 = vi0x8ACE9BDF.val[0];
const float32x4_t vi1x68AC = vextq_f32(vi1x0246, vi1x8ACE9BDF.val[0], 3);
vi1x0246 = vi1x8ACE9BDF.val[0];
const float32x4_t vi2x68AC = vextq_f32(vi2x0246, vi2x8ACE9BDF.val[0], 3);
vi2x0246 = vi2x8ACE9BDF.val[0];
const float32x4_t vi3x68AC = vextq_f32(vi3x0246, vi3x8ACE9BDF.val[0], 3);
vi3x0246 = vi3x8ACE9BDF.val[0];
const float32x4_t vi4x68AC = vextq_f32(vi4x0246, vi4x8ACE9BDF.val[0], 3);
vi4x0246 = vi4x8ACE9BDF.val[0];
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x68AC, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x68AC, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x68AC, vget_high_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x68AC, vget_low_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x68AC, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x8ACE9BDF.val[1], 3);
vi0x1357 = vi0x8ACE9BDF.val[1];
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x8ACE9BDF.val[1], 3);
vi1x1357 = vi1x8ACE9BDF.val[1];
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3);
vi2x1357 = vi2x8ACE9BDF.val[1];
const float32x4_t vi3x79BD = vextq_f32(vi3x1357, vi3x8ACE9BDF.val[1], 3);
vi3x1357 = vi3x8ACE9BDF.val[1];
const float32x4_t vi4x79BD = vextq_f32(vi4x1357, vi4x8ACE9BDF.val[1], 3);
vi4x1357 = vi4x8ACE9BDF.val[1];
const float32x4x2_t vi0xGIKMHJLN = vld2q_f32(i0); i0 += 8;
const float32x4x2_t vi1xGIKMHJLN = vld2q_f32(i1); i1 += 8;
const float32x4x2_t vi2xGIKMHJLN = vld2q_f32(i2); i2 += 8;
const float32x4x2_t vi3xGIKMHJLN = vld2q_f32(i3); i3 += 8;
const float32x4x2_t vi4xGIKMHJLN = vld2q_f32(i4); i4 += 8;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x79BD, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x79BD, vget_high_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x79BD, vget_low_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x79BD, vget_low_f32(vwGHIJ), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x79BD, vget_high_f32(vwKLMN), 0);
const float32x4_t vi0xACEG = vextq_f32(vi0x8ACE9BDF.val[0], vi0xGIKMHJLN.val[0], 1);
vi0x8ACE9BDF = vi0xGIKMHJLN;
const float32x4_t vi1xACEG = vextq_f32(vi1x8ACE9BDF.val[0], vi1xGIKMHJLN.val[0], 1);
vi1x8ACE9BDF = vi1xGIKMHJLN;
const float32x4_t vi2xACEG = vextq_f32(vi2x8ACE9BDF.val[0], vi2xGIKMHJLN.val[0], 1);
vi2x8ACE9BDF = vi2xGIKMHJLN;
const float32x4_t vi3xACEG = vextq_f32(vi3x8ACE9BDF.val[0], vi3xGIKMHJLN.val[0], 1);
vi3x8ACE9BDF = vi3xGIKMHJLN;
const float32x4_t vi4xACEG = vextq_f32(vi4x8ACE9BDF.val[0], vi4xGIKMHJLN.val[0], 1);
vi4x8ACE9BDF = vi4xGIKMHJLN;
vo0p0 = vmlaq_lane_f32(vo0p0, vi0xACEG, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1xACEG, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2xACEG, vget_high_f32(vwCDEF), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3xACEG, vget_low_f32(vwKLMN), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4xACEG, vwOP, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
vst1q_f32(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
const float32x4_t vi0x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[0])));
const float32x4_t vi1x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[0])));
const float32x4_t vi2x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[0])));
const float32x4_t vi3x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[0])));
const float32x4_t vi4x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[0])));
const float32x4_t vi0x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[1])));
const float32x4_t vi1x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[1])));
const float32x4_t vi2x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[1])));
const float32x4_t vi3x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[1])));
const float32x4_t vi4x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[1])));
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x8ACE, vget_high_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x8ACE, vget_low_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x8ACE, vget_low_f32(vwCDEF), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x8ACE, vget_high_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x8ACE, vget_high_f32(vwKLMN), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x9BDF, vget_low_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x9BDF, vget_low_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x9BDF, vget_high_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x9BDF, vget_high_f32(vwGHIJ), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x9BDF, vwOP, 0);
const float32x4_t vi0x68AC = vextq_f32(vi0x0246, vi0x8ACE, 3);
const float32x4_t vi1x68AC = vextq_f32(vi1x0246, vi1x8ACE, 3);
const float32x4_t vi2x68AC = vextq_f32(vi2x0246, vi2x8ACE, 3);
const float32x4_t vi3x68AC = vextq_f32(vi3x0246, vi3x8ACE, 3);
const float32x4_t vi4x68AC = vextq_f32(vi4x0246, vi4x8ACE, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x68AC, vget_low_f32(vw0123), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x68AC, vget_high_f32(vw4567), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x68AC, vget_high_f32(vw89AB), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x68AC, vget_low_f32(vwGHIJ), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x68AC, vget_low_f32(vwKLMN), 1);
const float32x4_t vi0x79BD = vextq_f32(vi0x1357, vi0x9BDF, 3);
const float32x4_t vi1x79BD = vextq_f32(vi1x1357, vi1x9BDF, 3);
const float32x4_t vi2x79BD = vextq_f32(vi2x1357, vi2x9BDF, 3);
const float32x4_t vi3x79BD = vextq_f32(vi3x1357, vi3x9BDF, 3);
const float32x4_t vi4x79BD = vextq_f32(vi4x1357, vi4x9BDF, 3);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0x79BD, vget_high_f32(vw0123), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1x79BD, vget_high_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2x79BD, vget_low_f32(vwCDEF), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3x79BD, vget_low_f32(vwGHIJ), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4x79BD, vget_high_f32(vwKLMN), 0);
const float32x4_t vzero = vmovq_n_f32(0.0f);
const float32x4_t vi0xACEG = vextq_f32(vi0x8ACE, vzero, 1);
const float32x4_t vi1xACEG = vextq_f32(vi1x8ACE, vzero, 1);
const float32x4_t vi2xACEG = vextq_f32(vi2x8ACE, vzero, 1);
const float32x4_t vi3xACEG = vextq_f32(vi3x8ACE, vzero, 1);
const float32x4_t vi4xACEG = vextq_f32(vi4x8ACE, vzero, 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi0xACEG, vget_low_f32(vw4567), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi1xACEG, vget_high_f32(vw89AB), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi2xACEG, vget_high_f32(vwCDEF), 1);
vo0p0 = vmlaq_lane_f32(vo0p0, vi3xACEG, vget_low_f32(vwKLMN), 0);
vo0p0 = vmlaq_lane_f32(vo0p0, vi4xACEG, vwOP, 1);
float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
vo0 = vminq_f32(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
vst1q_f32(o0, vo0); o0 += 4;
} else {
float32x2_t vo0_lo = vget_low_f32(vo0);
if (w_tmp & 2) {
vst1_f32(o0, vo0_lo); o0 += 2;
vo0_lo = vget_high_f32(vo0);
}
if (w_tmp & 1) {
vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 12,700 | 40.10356 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-scalar-1x1-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const float vmax = params->scalar.max;
const float vmin = params->scalar.min;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk03 = weights[4];
const float vk04 = weights[5];
const float vk10 = weights[6];
const float vk11 = weights[7];
const float vk12 = weights[8];
const float vk13 = weights[9];
const float vk14 = weights[10];
const float vk20 = weights[11];
const float vk21 = weights[12];
const float vk22 = weights[13];
const float vk23 = weights[14];
const float vk24 = weights[15];
const float vk30 = weights[16];
const float vk31 = weights[17];
const float vk32 = weights[18];
const float vk33 = weights[19];
const float vk34 = weights[20];
const float vk40 = weights[21];
const float vk41 = weights[22];
const float vk42 = weights[23];
const float vk43 = weights[24];
const float vk44 = weights[25];
const uint32_t padding_top_less_1 = padding_top - 1;
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi3x0 = 0.0f;
float vi4x0 = 0.0f;
float vi0x1 = 0.0f;
float vi1x1 = 0.0f;
float vi2x1 = 0.0f;
float vi3x1 = 0.0f;
float vi4x1 = 0.0f;
float vi0x2 = *i0++;
float vi1x2 = *i1++;
float vi2x2 = *i2++;
float vi3x2 = *i3++;
float vi4x2 = *i4++;
size_t w = input_width;
for (; w > 2 * sizeof(float); w -= 2 * sizeof(float)) {
const float vi0x3 = i0[0];
const float vi1x3 = i1[0];
const float vi2x3 = i2[0];
const float vi3x3 = i3[0];
const float vi4x3 = i4[0];
const float vi0x4 = i0[1];
i0 += 2;
const float vi1x4 = i1[1];
i1 += 2;
const float vi2x4 = i2[1];
i2 += 2;
const float vi3x4 = i3[1];
i3 += 2;
const float vi4x4 = i4[1];
i4 += 2;
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo0p1 += vi3x0 * vk30;
vo0p0 += vi4x0 * vk40;
vi0x0 = vi0x2;
vi1x0 = vi1x2;
vi2x0 = vi2x2;
vi3x0 = vi3x2;
vi4x0 = vi4x2;
vo0p1 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo0p1 += vi4x1 * vk41;
vi0x1 = vi0x3;
vi1x1 = vi1x3;
vi2x1 = vi2x3;
vi3x1 = vi3x3;
vi4x1 = vi4x3;
vo0p0 += vi0x2 * vk02;
vo0p1 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo0p0 += vi4x2 * vk42;
vi0x2 = vi0x4;
vi1x2 = vi1x4;
vi2x2 = vi2x4;
vi3x2 = vi3x4;
vi4x2 = vi4x4;
vo0p1 += vi0x3 * vk03;
vo0p0 += vi1x3 * vk13;
vo0p1 += vi2x3 * vk23;
vo0p0 += vi3x3 * vk33;
vo0p1 += vi4x3 * vk43;
vo0p0 += vi0x4 * vk04;
vo0p1 += vi1x4 * vk14;
vo0p0 += vi2x4 * vk24;
vo0p1 += vi3x4 * vk34;
vo0p0 += vi4x4 * vk44;
vo0p0 += vo0p1;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
if XNN_LIKELY(w == 2 * sizeof(float)) {
const float vi0x3 = *i0++;
const float vi1x3 = *i1++;
const float vi2x3 = *i2++;
const float vi3x3 = *i3++;
const float vi4x3 = *i4++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo0p1 += vi3x0 * vk30;
vo0p0 += vi4x0 * vk40;
vo0p1 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo0p1 += vi4x1 * vk41;
vo0p0 += vi0x2 * vk02;
vo0p1 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo0p0 += vi4x2 * vk42;
vo0p1 += vi0x3 * vk03;
vo0p0 += vi1x3 * vk13;
vo0p1 += vi2x3 * vk23;
vo0p0 += vi3x3 * vk33;
vo0p1 += vi4x3 * vk43;
vo0p0 += vo0p1;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
} else {
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo0p1 += vi3x0 * vk30;
vo0p0 += vi4x0 * vk40;
vo0p1 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo0p1 += vi4x1 * vk41;
vo0p0 += vi0x2 * vk02;
vo0p1 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo0p0 += vi4x2 * vk42;
vo0p0 += vo0p1;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i2 - input_width);
i1 = (const float*) ((uintptr_t) i2);
i2 = (const float*) ((uintptr_t) i3);
i3 = (const float*) ((uintptr_t) i4);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 6,538 | 24.542969 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-scalar-1x1-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const float vmax = params->scalar.max;
const float vmin = params->scalar.min;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk03 = weights[4];
const float vk04 = weights[5];
const float vk10 = weights[6];
const float vk11 = weights[7];
const float vk12 = weights[8];
const float vk13 = weights[9];
const float vk14 = weights[10];
const float vk20 = weights[11];
const float vk21 = weights[12];
const float vk22 = weights[13];
const float vk23 = weights[14];
const float vk24 = weights[15];
const float vk30 = weights[16];
const float vk31 = weights[17];
const float vk32 = weights[18];
const float vk33 = weights[19];
const float vk34 = weights[20];
const float vk40 = weights[21];
const float vk41 = weights[22];
const float vk42 = weights[23];
const float vk43 = weights[24];
const float vk44 = weights[25];
const uint32_t padding_top_less_1 = padding_top - 1;
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi3x0 = 0.0f;
float vi4x0 = 0.0f;
float vi0x1 = 0.0f;
float vi1x1 = 0.0f;
float vi2x1 = 0.0f;
float vi3x1 = 0.0f;
float vi4x1 = 0.0f;
float vi0x2 = *i0++;
float vi1x2 = *i1++;
float vi2x2 = *i2++;
float vi3x2 = *i3++;
float vi4x2 = *i4++;
size_t w = input_width;
for (; w > 2 * sizeof(float); w -= 2 * sizeof(float)) {
const float vi0x3 = i0[0];
const float vi1x3 = i1[0];
const float vi2x3 = i2[0];
const float vi3x3 = i3[0];
const float vi4x3 = i4[0];
const float vi0x4 = i0[1];
i0 += 2;
const float vi1x4 = i1[1];
i1 += 2;
const float vi2x4 = i2[1];
i2 += 2;
const float vi3x4 = i3[1];
i3 += 2;
const float vi4x4 = i4[1];
i4 += 2;
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
vo0p0 += vi3x0 * vk30;
vo0p1 += vi4x0 * vk40;
vi0x0 = vi0x2;
vi1x0 = vi1x2;
vi2x0 = vi2x2;
vi3x0 = vi3x2;
vi4x0 = vi4x2;
vo0p2 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo0p2 += vi3x1 * vk31;
vo0p0 += vi4x1 * vk41;
vi0x1 = vi0x3;
vi1x1 = vi1x3;
vi2x1 = vi2x3;
vi3x1 = vi3x3;
vi4x1 = vi4x3;
vo0p1 += vi0x2 * vk02;
vo0p2 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo0p2 += vi4x2 * vk42;
vi0x2 = vi0x4;
vi1x2 = vi1x4;
vi2x2 = vi2x4;
vi3x2 = vi3x4;
vi4x2 = vi4x4;
vo0p0 += vi0x3 * vk03;
vo0p1 += vi1x3 * vk13;
vo0p2 += vi2x3 * vk23;
vo0p0 += vi3x3 * vk33;
vo0p1 += vi4x3 * vk43;
vo0p2 += vi0x4 * vk04;
vo0p0 += vi1x4 * vk14;
vo0p1 += vi2x4 * vk24;
vo0p2 += vi3x4 * vk34;
vo0p0 += vi4x4 * vk44;
vo0p0 += vo0p1;
vo0p0 += vo0p2;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
if XNN_LIKELY(w == 2 * sizeof(float)) {
const float vi0x3 = *i0++;
const float vi1x3 = *i1++;
const float vi2x3 = *i2++;
const float vi3x3 = *i3++;
const float vi4x3 = *i4++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
vo0p0 += vi3x0 * vk30;
vo0p1 += vi4x0 * vk40;
vo0p2 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo0p2 += vi3x1 * vk31;
vo0p0 += vi4x1 * vk41;
vo0p1 += vi0x2 * vk02;
vo0p2 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo0p2 += vi4x2 * vk42;
vo0p0 += vi0x3 * vk03;
vo0p1 += vi1x3 * vk13;
vo0p2 += vi2x3 * vk23;
vo0p0 += vi3x3 * vk33;
vo0p1 += vi4x3 * vk43;
vo0p0 += vo0p1;
vo0p0 += vo0p2;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
} else {
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
vo0p0 += vi3x0 * vk30;
vo0p1 += vi4x0 * vk40;
vo0p2 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p1 += vi2x1 * vk21;
vo0p2 += vi3x1 * vk31;
vo0p0 += vi4x1 * vk41;
vo0p1 += vi0x2 * vk02;
vo0p2 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo0p2 += vi4x2 * vk42;
vo0p0 += vo0p1;
vo0p0 += vo0p2;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i2 - input_width);
i1 = (const float*) ((uintptr_t) i2);
i2 = (const float*) ((uintptr_t) i3);
i3 = (const float*) ((uintptr_t) i4);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 6,619 | 24.559846 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-scalar-1x1-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const float vmax = params->scalar.max;
const float vmin = params->scalar.min;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk03 = weights[4];
const float vk04 = weights[5];
const float vk10 = weights[6];
const float vk11 = weights[7];
const float vk12 = weights[8];
const float vk13 = weights[9];
const float vk14 = weights[10];
const float vk20 = weights[11];
const float vk21 = weights[12];
const float vk22 = weights[13];
const float vk23 = weights[14];
const float vk24 = weights[15];
const float vk30 = weights[16];
const float vk31 = weights[17];
const float vk32 = weights[18];
const float vk33 = weights[19];
const float vk34 = weights[20];
const float vk40 = weights[21];
const float vk41 = weights[22];
const float vk42 = weights[23];
const float vk43 = weights[24];
const float vk44 = weights[25];
const uint32_t padding_top_less_1 = padding_top - 1;
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi3x0 = 0.0f;
float vi4x0 = 0.0f;
float vi0x1 = 0.0f;
float vi1x1 = 0.0f;
float vi2x1 = 0.0f;
float vi3x1 = 0.0f;
float vi4x1 = 0.0f;
float vi0x2 = *i0++;
float vi1x2 = *i1++;
float vi2x2 = *i2++;
float vi3x2 = *i3++;
float vi4x2 = *i4++;
size_t w = input_width;
for (; w > 2 * sizeof(float); w -= 2 * sizeof(float)) {
const float vi0x3 = i0[0];
const float vi1x3 = i1[0];
const float vi2x3 = i2[0];
const float vi3x3 = i3[0];
const float vi4x3 = i4[0];
const float vi0x4 = i0[1];
i0 += 2;
const float vi1x4 = i1[1];
i1 += 2;
const float vi2x4 = i2[1];
i2 += 2;
const float vi3x4 = i3[1];
i3 += 2;
const float vi4x4 = i4[1];
i4 += 2;
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
float vo0p3 = vi3x0 * vk30;
vo0p0 += vi4x0 * vk40;
vi0x0 = vi0x2;
vi1x0 = vi1x2;
vi2x0 = vi2x2;
vi3x0 = vi3x2;
vi4x0 = vi4x2;
vo0p1 += vi0x1 * vk01;
vo0p2 += vi1x1 * vk11;
vo0p3 += vi2x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo0p1 += vi4x1 * vk41;
vi0x1 = vi0x3;
vi1x1 = vi1x3;
vi2x1 = vi2x3;
vi3x1 = vi3x3;
vi4x1 = vi4x3;
vo0p2 += vi0x2 * vk02;
vo0p3 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo0p2 += vi4x2 * vk42;
vi0x2 = vi0x4;
vi1x2 = vi1x4;
vi2x2 = vi2x4;
vi3x2 = vi3x4;
vi4x2 = vi4x4;
vo0p3 += vi0x3 * vk03;
vo0p0 += vi1x3 * vk13;
vo0p1 += vi2x3 * vk23;
vo0p2 += vi3x3 * vk33;
vo0p3 += vi4x3 * vk43;
vo0p0 += vi0x4 * vk04;
vo0p1 += vi1x4 * vk14;
vo0p2 += vi2x4 * vk24;
vo0p3 += vi3x4 * vk34;
vo0p0 += vi4x4 * vk44;
vo0p0 += vo0p1;
vo0p2 += vo0p3;
vo0p0 += vo0p2;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
if XNN_LIKELY(w == 2 * sizeof(float)) {
const float vi0x3 = *i0++;
const float vi1x3 = *i1++;
const float vi2x3 = *i2++;
const float vi3x3 = *i3++;
const float vi4x3 = *i4++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
float vo0p3 = vi3x0 * vk30;
vo0p0 += vi4x0 * vk40;
vo0p1 += vi0x1 * vk01;
vo0p2 += vi1x1 * vk11;
vo0p3 += vi2x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo0p1 += vi4x1 * vk41;
vo0p2 += vi0x2 * vk02;
vo0p3 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo0p2 += vi4x2 * vk42;
vo0p3 += vi0x3 * vk03;
vo0p0 += vi1x3 * vk13;
vo0p1 += vi2x3 * vk23;
vo0p2 += vi3x3 * vk33;
vo0p3 += vi4x3 * vk43;
vo0p0 += vo0p1;
vo0p2 += vo0p3;
vo0p0 += vo0p2;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
} else {
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
float vo0p3 = vi3x0 * vk30;
vo0p0 += vi4x0 * vk40;
vo0p1 += vi0x1 * vk01;
vo0p2 += vi1x1 * vk11;
vo0p3 += vi2x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo0p1 += vi4x1 * vk41;
vo0p2 += vi0x2 * vk02;
vo0p3 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p1 += vi3x2 * vk32;
vo0p2 += vi4x2 * vk42;
vo0p0 += vo0p1;
vo0p2 += vo0p3;
vo0p0 += vo0p2;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i2 - input_width);
i1 = (const float*) ((uintptr_t) i2);
i2 = (const float*) ((uintptr_t) i3);
i3 = (const float*) ((uintptr_t) i4);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 6,700 | 24.576336 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-scalar-1x1-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc5(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const float vmax = params->scalar.max;
const float vmin = params->scalar.min;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk03 = weights[4];
const float vk04 = weights[5];
const float vk10 = weights[6];
const float vk11 = weights[7];
const float vk12 = weights[8];
const float vk13 = weights[9];
const float vk14 = weights[10];
const float vk20 = weights[11];
const float vk21 = weights[12];
const float vk22 = weights[13];
const float vk23 = weights[14];
const float vk24 = weights[15];
const float vk30 = weights[16];
const float vk31 = weights[17];
const float vk32 = weights[18];
const float vk33 = weights[19];
const float vk34 = weights[20];
const float vk40 = weights[21];
const float vk41 = weights[22];
const float vk42 = weights[23];
const float vk43 = weights[24];
const float vk44 = weights[25];
const uint32_t padding_top_less_1 = padding_top - 1;
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi3x0 = 0.0f;
float vi4x0 = 0.0f;
float vi0x1 = 0.0f;
float vi1x1 = 0.0f;
float vi2x1 = 0.0f;
float vi3x1 = 0.0f;
float vi4x1 = 0.0f;
float vi0x2 = *i0++;
float vi1x2 = *i1++;
float vi2x2 = *i2++;
float vi3x2 = *i3++;
float vi4x2 = *i4++;
size_t w = input_width;
for (; w > 2 * sizeof(float); w -= 2 * sizeof(float)) {
const float vi0x3 = i0[0];
const float vi1x3 = i1[0];
const float vi2x3 = i2[0];
const float vi3x3 = i3[0];
const float vi4x3 = i4[0];
const float vi0x4 = i0[1];
i0 += 2;
const float vi1x4 = i1[1];
i1 += 2;
const float vi2x4 = i2[1];
i2 += 2;
const float vi3x4 = i3[1];
i3 += 2;
const float vi4x4 = i4[1];
i4 += 2;
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
float vo0p3 = vi3x0 * vk30;
float vo0p4 = vi4x0 * vk40;
vi0x0 = vi0x2;
vi1x0 = vi1x2;
vi2x0 = vi2x2;
vi3x0 = vi3x2;
vi4x0 = vi4x2;
vo0p0 += vi0x1 * vk01;
vo0p1 += vi1x1 * vk11;
vo0p2 += vi2x1 * vk21;
vo0p3 += vi3x1 * vk31;
vo0p4 += vi4x1 * vk41;
vi0x1 = vi0x3;
vi1x1 = vi1x3;
vi2x1 = vi2x3;
vi3x1 = vi3x3;
vi4x1 = vi4x3;
vo0p0 += vi0x2 * vk02;
vo0p1 += vi1x2 * vk12;
vo0p2 += vi2x2 * vk22;
vo0p3 += vi3x2 * vk32;
vo0p4 += vi4x2 * vk42;
vi0x2 = vi0x4;
vi1x2 = vi1x4;
vi2x2 = vi2x4;
vi3x2 = vi3x4;
vi4x2 = vi4x4;
vo0p0 += vi0x3 * vk03;
vo0p1 += vi1x3 * vk13;
vo0p2 += vi2x3 * vk23;
vo0p3 += vi3x3 * vk33;
vo0p4 += vi4x3 * vk43;
vo0p0 += vi0x4 * vk04;
vo0p1 += vi1x4 * vk14;
vo0p2 += vi2x4 * vk24;
vo0p3 += vi3x4 * vk34;
vo0p4 += vi4x4 * vk44;
vo0p0 += vo0p1;
vo0p2 += vo0p3;
vo0p0 += vo0p2;
vo0p0 += vo0p4;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
if XNN_LIKELY(w == 2 * sizeof(float)) {
const float vi0x3 = *i0++;
const float vi1x3 = *i1++;
const float vi2x3 = *i2++;
const float vi3x3 = *i3++;
const float vi4x3 = *i4++;
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
float vo0p3 = vi3x0 * vk30;
float vo0p4 = vi4x0 * vk40;
vo0p0 += vi0x1 * vk01;
vo0p1 += vi1x1 * vk11;
vo0p2 += vi2x1 * vk21;
vo0p3 += vi3x1 * vk31;
vo0p4 += vi4x1 * vk41;
vo0p0 += vi0x2 * vk02;
vo0p1 += vi1x2 * vk12;
vo0p2 += vi2x2 * vk22;
vo0p3 += vi3x2 * vk32;
vo0p4 += vi4x2 * vk42;
vo0p0 += vi0x3 * vk03;
vo0p1 += vi1x3 * vk13;
vo0p2 += vi2x3 * vk23;
vo0p3 += vi3x3 * vk33;
vo0p4 += vi4x3 * vk43;
vo0p0 += vo0p1;
vo0p2 += vo0p3;
vo0p0 += vo0p2;
vo0p0 += vo0p4;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
} else {
float vo0p0 = vbias + vi0x0 * vk00;
float vo0p1 = vi1x0 * vk10;
float vo0p2 = vi2x0 * vk20;
float vo0p3 = vi3x0 * vk30;
float vo0p4 = vi4x0 * vk40;
vo0p0 += vi0x1 * vk01;
vo0p1 += vi1x1 * vk11;
vo0p2 += vi2x1 * vk21;
vo0p3 += vi3x1 * vk31;
vo0p4 += vi4x1 * vk41;
vo0p0 += vi0x2 * vk02;
vo0p1 += vi1x2 * vk12;
vo0p2 += vi2x2 * vk22;
vo0p3 += vi3x2 * vk32;
vo0p4 += vi4x2 * vk42;
vo0p0 += vo0p1;
vo0p2 += vo0p3;
vo0p0 += vo0p2;
vo0p0 += vo0p4;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i2 - input_width);
i1 = (const float*) ((uintptr_t) i2);
i2 = (const float*) ((uintptr_t) i3);
i3 = (const float*) ((uintptr_t) i4);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 6,781 | 24.592453 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-scalar-1x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const float vmax = params->scalar.max;
const float vmin = params->scalar.min;
const float vbias = weights[0];
const float vk00 = weights[1];
const float vk01 = weights[2];
const float vk02 = weights[3];
const float vk03 = weights[4];
const float vk04 = weights[5];
const float vk10 = weights[6];
const float vk11 = weights[7];
const float vk12 = weights[8];
const float vk13 = weights[9];
const float vk14 = weights[10];
const float vk20 = weights[11];
const float vk21 = weights[12];
const float vk22 = weights[13];
const float vk23 = weights[14];
const float vk24 = weights[15];
const float vk30 = weights[16];
const float vk31 = weights[17];
const float vk32 = weights[18];
const float vk33 = weights[19];
const float vk34 = weights[20];
const float vk40 = weights[21];
const float vk41 = weights[22];
const float vk42 = weights[23];
const float vk43 = weights[24];
const float vk44 = weights[25];
const uint32_t padding_top_less_1 = padding_top - 1;
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
float vi0x0 = 0.0f;
float vi1x0 = 0.0f;
float vi2x0 = 0.0f;
float vi3x0 = 0.0f;
float vi4x0 = 0.0f;
float vi0x1 = 0.0f;
float vi1x1 = 0.0f;
float vi2x1 = 0.0f;
float vi3x1 = 0.0f;
float vi4x1 = 0.0f;
float vi0x2 = *i0++;
float vi1x2 = *i1++;
float vi2x2 = *i2++;
float vi3x2 = *i3++;
float vi4x2 = *i4++;
size_t w = input_width;
for (; w > 2 * sizeof(float); w -= 2 * sizeof(float)) {
const float vi0x3 = i0[0];
const float vi1x3 = i1[0];
const float vi2x3 = i2[0];
const float vi3x3 = i3[0];
const float vi4x3 = i4[0];
const float vi0x4 = i0[1];
i0 += 2;
const float vi1x4 = i1[1];
i1 += 2;
const float vi2x4 = i2[1];
i2 += 2;
const float vi3x4 = i3[1];
i3 += 2;
const float vi4x4 = i4[1];
i4 += 2;
float vo0p0 = vbias + vi0x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo0p0 += vi3x0 * vk30;
vo0p0 += vi4x0 * vk40;
vi0x0 = vi0x2;
vi1x0 = vi1x2;
vi2x0 = vi2x2;
vi3x0 = vi3x2;
vi4x0 = vi4x2;
vo0p0 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p0 += vi2x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo0p0 += vi4x1 * vk41;
vi0x1 = vi0x3;
vi1x1 = vi1x3;
vi2x1 = vi2x3;
vi3x1 = vi3x3;
vi4x1 = vi4x3;
vo0p0 += vi0x2 * vk02;
vo0p0 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p0 += vi3x2 * vk32;
vo0p0 += vi4x2 * vk42;
vi0x2 = vi0x4;
vi1x2 = vi1x4;
vi2x2 = vi2x4;
vi3x2 = vi3x4;
vi4x2 = vi4x4;
vo0p0 += vi0x3 * vk03;
vo0p0 += vi1x3 * vk13;
vo0p0 += vi2x3 * vk23;
vo0p0 += vi3x3 * vk33;
vo0p0 += vi4x3 * vk43;
vo0p0 += vi0x4 * vk04;
vo0p0 += vi1x4 * vk14;
vo0p0 += vi2x4 * vk24;
vo0p0 += vi3x4 * vk34;
vo0p0 += vi4x4 * vk44;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
if XNN_LIKELY(w == 2 * sizeof(float)) {
const float vi0x3 = *i0++;
const float vi1x3 = *i1++;
const float vi2x3 = *i2++;
const float vi3x3 = *i3++;
const float vi4x3 = *i4++;
float vo0p0 = vbias + vi0x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo0p0 += vi3x0 * vk30;
vo0p0 += vi4x0 * vk40;
vo0p0 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p0 += vi2x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo0p0 += vi4x1 * vk41;
vo0p0 += vi0x2 * vk02;
vo0p0 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p0 += vi3x2 * vk32;
vo0p0 += vi4x2 * vk42;
vo0p0 += vi0x3 * vk03;
vo0p0 += vi1x3 * vk13;
vo0p0 += vi2x3 * vk23;
vo0p0 += vi3x3 * vk33;
vo0p0 += vi4x3 * vk43;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
} else {
float vo0p0 = vbias + vi0x0 * vk00;
vo0p0 += vi1x0 * vk10;
vo0p0 += vi2x0 * vk20;
vo0p0 += vi3x0 * vk30;
vo0p0 += vi4x0 * vk40;
vo0p0 += vi0x1 * vk01;
vo0p0 += vi1x1 * vk11;
vo0p0 += vi2x1 * vk21;
vo0p0 += vi3x1 * vk31;
vo0p0 += vi4x1 * vk41;
vo0p0 += vi0x2 * vk02;
vo0p0 += vi1x2 * vk12;
vo0p0 += vi2x2 * vk22;
vo0p0 += vi3x2 * vk32;
vo0p0 += vi4x2 * vk42;
float vo0 = math_max_f32(vo0p0, vmin);
vo0 = math_min_f32(vo0, vmax);
*o0++ = vo0;
}
i0 = (const float*) ((uintptr_t) i2 - input_width);
i1 = (const float*) ((uintptr_t) i2);
i2 = (const float*) ((uintptr_t) i3);
i3 = (const float*) ((uintptr_t) i4);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 6,452 | 24.505929 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-sse-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__sse_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const __m128 vmask_even = _mm_load_ps((const float*) params->sse_stride2.mask_even);
const __m128 vmask_odd = _mm_load_ps((const float*) params->sse_stride2.mask_odd);
const __m128 vmax = _mm_load_ps(params->sse_stride2.max);
const __m128 vmin = _mm_load_ps(params->sse_stride2.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk03 = _mm_load1_ps(weights + 4);
const __m128 vk04 = _mm_load1_ps(weights + 5);
const __m128 vk10 = _mm_load1_ps(weights + 6);
const __m128 vk11 = _mm_load1_ps(weights + 7);
const __m128 vk12 = _mm_load1_ps(weights + 8);
const __m128 vk13 = _mm_load1_ps(weights + 9);
const __m128 vk14 = _mm_load1_ps(weights + 10);
const __m128 vk20 = _mm_load1_ps(weights + 11);
const __m128 vk21 = _mm_load1_ps(weights + 12);
const __m128 vk22 = _mm_load1_ps(weights + 13);
const __m128 vk23 = _mm_load1_ps(weights + 14);
const __m128 vk24 = _mm_load1_ps(weights + 15);
const __m128 vk30 = _mm_load1_ps(weights + 16);
const __m128 vk31 = _mm_load1_ps(weights + 17);
const __m128 vk32 = _mm_load1_ps(weights + 18);
const __m128 vk33 = _mm_load1_ps(weights + 19);
const __m128 vk34 = _mm_load1_ps(weights + 20);
const __m128 vk40 = _mm_load1_ps(weights + 21);
const __m128 vk41 = _mm_load1_ps(weights + 22);
const __m128 vk42 = _mm_load1_ps(weights + 23);
const __m128 vk43 = _mm_load1_ps(weights + 24);
const __m128 vk44 = _mm_load1_ps(weights + 25);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
__m128 vi0x6024 = _mm_setzero_ps();
__m128 vi1x6024 = _mm_setzero_ps();
__m128 vi2x6024 = _mm_setzero_ps();
__m128 vi3x6024 = _mm_setzero_ps();
__m128 vi4x6024 = _mm_setzero_ps();
__m128 vi0x7135 = _mm_setzero_ps();
__m128 vi1x7135 = _mm_setzero_ps();
__m128 vi2x7135 = _mm_setzero_ps();
__m128 vi3x7135 = _mm_setzero_ps();
__m128 vi4x7135 = _mm_setzero_ps();
const __m128 vi0x89AB = _mm_loadu_ps(i0);
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vi1x89AB = _mm_loadu_ps(i1);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vi2x89AB = _mm_loadu_ps(i2);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vi3x89AB = _mm_loadu_ps(i3);
const __m128 vi3xCDEF = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vi4x89AB = _mm_loadu_ps(i4);
const __m128 vi4xCDEF = _mm_loadu_ps(i4 + 4);
i4 += 8;
__m128 vi0x8ACE = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi0x9BDF = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
__m128 vi1x8ACE = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi1x9BDF = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
__m128 vi2x8ACE = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi2x9BDF = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
__m128 vi3x8ACE = _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi3x9BDF = _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
__m128 vi4x8ACE = _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi4x9BDF = _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk02));
__m128 vo0p1 = _mm_mul_ps(vi1x8ACE, vk12);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x8ACE, vk22));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x8ACE, vk32));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x8ACE, vk42));
const __m128 vi0xE8AC = _mm_shuffle_ps(vi0x8ACE, vi0x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xE8AC = _mm_shuffle_ps(vi1x8ACE, vi1x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xE8AC = _mm_shuffle_ps(vi2x8ACE, vi2x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3xE8AC = _mm_shuffle_ps(vi3x8ACE, vi3x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4xE8AC = _mm_shuffle_ps(vi4x8ACE, vi4x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x9BDF, vk03));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk13));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x9BDF, vk23));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x9BDF, vk33));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x9BDF, vk43));
const __m128 vi0x68AC = _mm_move_ss(vi0xE8AC, vi0x6024);
vi0x6024 = vi0xE8AC;
const __m128 vi1x68AC = _mm_move_ss(vi1xE8AC, vi1x6024);
vi1x6024 = vi1xE8AC;
const __m128 vi2x68AC = _mm_move_ss(vi2xE8AC, vi2x6024);
vi2x6024 = vi2xE8AC;
const __m128 vi3x68AC = _mm_move_ss(vi3xE8AC, vi3x6024);
vi3x6024 = vi3xE8AC;
const __m128 vi4x68AC = _mm_move_ss(vi4xE8AC, vi4x6024);
vi4x6024 = vi4xE8AC;
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3xF9BD = _mm_shuffle_ps(vi3x9BDF, vi3x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4xF9BD = _mm_shuffle_ps(vi4x9BDF, vi4x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x68AC, vk00));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x68AC, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x68AC, vk20));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x68AC, vk30));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x68AC, vk40));
const __m128 vi0xGHIJ = _mm_loadu_ps(i0);
const __m128 vi0xKLMN = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vi1xGHIJ = _mm_loadu_ps(i1);
const __m128 vi1xKLMN = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vi2xGHIJ = _mm_loadu_ps(i2);
const __m128 vi2xKLMN = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vi3xGHIJ = _mm_loadu_ps(i3);
const __m128 vi3xKLMN = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vi4xGHIJ = _mm_loadu_ps(i4);
const __m128 vi4xKLMN = _mm_loadu_ps(i4 + 4);
i4 += 8;
const __m128 vi0x79BD = _mm_move_ss(vi0xF9BD, vi0x7135);
vi0x7135 = vi0xF9BD;
const __m128 vi1x79BD = _mm_move_ss(vi1xF9BD, vi1x7135);
vi1x7135 = vi1xF9BD;
const __m128 vi2x79BD = _mm_move_ss(vi2xF9BD, vi2x7135);
vi2x7135 = vi2xF9BD;
const __m128 vi3x79BD = _mm_move_ss(vi3xF9BD, vi3x7135);
vi3x7135 = vi3xF9BD;
const __m128 vi4x79BD = _mm_move_ss(vi4xF9BD, vi4x7135);
vi4x7135 = vi4xF9BD;
const __m128 vi0xGIKM = _mm_shuffle_ps(vi0xGHIJ, vi0xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi0xHJLN = _mm_shuffle_ps(vi0xGHIJ, vi0xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi0x9BDF = vi0xHJLN;
const __m128 vi1xGIKM = _mm_shuffle_ps(vi1xGHIJ, vi1xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi1xHJLN = _mm_shuffle_ps(vi1xGHIJ, vi1xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi1x9BDF = vi1xHJLN;
const __m128 vi2xGIKM = _mm_shuffle_ps(vi2xGHIJ, vi2xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi2xHJLN = _mm_shuffle_ps(vi2xGHIJ, vi2xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi2x9BDF = vi2xHJLN;
const __m128 vi3xGIKM = _mm_shuffle_ps(vi3xGHIJ, vi3xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi3xHJLN = _mm_shuffle_ps(vi3xGHIJ, vi3xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi3x9BDF = vi3xHJLN;
const __m128 vi4xGIKM = _mm_shuffle_ps(vi4xGHIJ, vi4xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi4xHJLN = _mm_shuffle_ps(vi4xGHIJ, vi4xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi4x9BDF = vi4xHJLN;
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x79BD, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x79BD, vk11));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x79BD, vk21));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x79BD, vk31));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x79BD, vk41));
const __m128 vi0xGACE = _mm_move_ss(vi0x8ACE, vi0xGIKM);
vi0x8ACE = vi0xGIKM;
const __m128 vi1xGACE = _mm_move_ss(vi1x8ACE, vi1xGIKM);
vi1x8ACE = vi1xGIKM;
const __m128 vi2xGACE = _mm_move_ss(vi2x8ACE, vi2xGIKM);
vi2x8ACE = vi2xGIKM;
const __m128 vi3xGACE = _mm_move_ss(vi3x8ACE, vi3xGIKM);
vi3x8ACE = vi3xGIKM;
const __m128 vi4xGACE = _mm_move_ss(vi4x8ACE, vi4xGIKM);
vi4x8ACE = vi4xGIKM;
const __m128 vi0xACEG = _mm_shuffle_ps(vi0xGACE, vi0xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1xACEG = _mm_shuffle_ps(vi1xGACE, vi1xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2xACEG = _mm_shuffle_ps(vi2xGACE, vi2xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3xACEG = _mm_shuffle_ps(vi3xGACE, vi3xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4xACEG = _mm_shuffle_ps(vi4xGACE, vi4xGACE, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0xACEG, vk04));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1xACEG, vk14));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2xACEG, vk24));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3xACEG, vk34));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4xACEG, vk44));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
vi0x8ACE = _mm_and_ps(vi0x8ACE, vmask_even);
vi0x9BDF = _mm_and_ps(vi0x9BDF, vmask_odd);
vi1x8ACE = _mm_and_ps(vi1x8ACE, vmask_even);
vi1x9BDF = _mm_and_ps(vi1x9BDF, vmask_odd);
vi2x8ACE = _mm_and_ps(vi2x8ACE, vmask_even);
vi2x9BDF = _mm_and_ps(vi2x9BDF, vmask_odd);
vi3x8ACE = _mm_and_ps(vi3x8ACE, vmask_even);
vi3x9BDF = _mm_and_ps(vi3x9BDF, vmask_odd);
vi4x8ACE = _mm_and_ps(vi4x8ACE, vmask_even);
vi4x9BDF = _mm_and_ps(vi4x9BDF, vmask_odd);
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk02));
__m128 vo0p1 = _mm_mul_ps(vi1x8ACE, vk12);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x8ACE, vk22));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x8ACE, vk32));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x8ACE, vk42));
const __m128 vi0xE8AC = _mm_shuffle_ps(vi0x8ACE, vi0x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xE8AC = _mm_shuffle_ps(vi1x8ACE, vi1x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xE8AC = _mm_shuffle_ps(vi2x8ACE, vi2x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3xE8AC = _mm_shuffle_ps(vi3x8ACE, vi3x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4xE8AC = _mm_shuffle_ps(vi4x8ACE, vi4x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x9BDF, vk03));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk13));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x9BDF, vk23));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x9BDF, vk33));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x9BDF, vk43));
const __m128 vi0x68AC = _mm_move_ss(vi0xE8AC, vi0x6024);
const __m128 vi1x68AC = _mm_move_ss(vi1xE8AC, vi1x6024);
const __m128 vi2x68AC = _mm_move_ss(vi2xE8AC, vi2x6024);
const __m128 vi3x68AC = _mm_move_ss(vi3xE8AC, vi3x6024);
const __m128 vi4x68AC = _mm_move_ss(vi4xE8AC, vi4x6024);
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3xF9BD = _mm_shuffle_ps(vi3x9BDF, vi3x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4xF9BD = _mm_shuffle_ps(vi4x9BDF, vi4x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x68AC, vk00));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x68AC, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x68AC, vk20));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x68AC, vk30));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x68AC, vk40));
const __m128 vi0x79BD = _mm_move_ss(vi0xF9BD, vi0x7135);
const __m128 vi1x79BD = _mm_move_ss(vi1xF9BD, vi1x7135);
const __m128 vi2x79BD = _mm_move_ss(vi2xF9BD, vi2x7135);
const __m128 vi3x79BD = _mm_move_ss(vi3xF9BD, vi3x7135);
const __m128 vi4x79BD = _mm_move_ss(vi4xF9BD, vi4x7135);
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x79BD, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x79BD, vk11));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x79BD, vk21));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x79BD, vk31));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x79BD, vk41));
const __m128 vzero = _mm_setzero_ps();
const __m128 vi0xGACE = _mm_move_ss(vi0x8ACE, vzero);
const __m128 vi1xGACE = _mm_move_ss(vi1x8ACE, vzero);
const __m128 vi2xGACE = _mm_move_ss(vi2x8ACE, vzero);
const __m128 vi3xGACE = _mm_move_ss(vi3x8ACE, vzero);
const __m128 vi4xGACE = _mm_move_ss(vi4x8ACE, vzero);
const __m128 vi0xACEG = _mm_shuffle_ps(vi0xGACE, vi0xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1xACEG = _mm_shuffle_ps(vi1xGACE, vi1xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2xACEG = _mm_shuffle_ps(vi2xGACE, vi2xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3xACEG = _mm_shuffle_ps(vi3xGACE, vi3xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4xACEG = _mm_shuffle_ps(vi4xGACE, vi4xGACE, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0xACEG, vk04));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1xACEG, vk14));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2xACEG, vk24));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3xACEG, vk34));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4xACEG, vk44));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w_tmp & 2) {
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w_tmp & 1) {
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 16,662 | 43.913747 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-sse-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__sse_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const __m128 vmask_even = _mm_load_ps((const float*) params->sse_stride2.mask_even);
const __m128 vmask_odd = _mm_load_ps((const float*) params->sse_stride2.mask_odd);
const __m128 vmax = _mm_load_ps(params->sse_stride2.max);
const __m128 vmin = _mm_load_ps(params->sse_stride2.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk03 = _mm_load1_ps(weights + 4);
const __m128 vk04 = _mm_load1_ps(weights + 5);
const __m128 vk10 = _mm_load1_ps(weights + 6);
const __m128 vk11 = _mm_load1_ps(weights + 7);
const __m128 vk12 = _mm_load1_ps(weights + 8);
const __m128 vk13 = _mm_load1_ps(weights + 9);
const __m128 vk14 = _mm_load1_ps(weights + 10);
const __m128 vk20 = _mm_load1_ps(weights + 11);
const __m128 vk21 = _mm_load1_ps(weights + 12);
const __m128 vk22 = _mm_load1_ps(weights + 13);
const __m128 vk23 = _mm_load1_ps(weights + 14);
const __m128 vk24 = _mm_load1_ps(weights + 15);
const __m128 vk30 = _mm_load1_ps(weights + 16);
const __m128 vk31 = _mm_load1_ps(weights + 17);
const __m128 vk32 = _mm_load1_ps(weights + 18);
const __m128 vk33 = _mm_load1_ps(weights + 19);
const __m128 vk34 = _mm_load1_ps(weights + 20);
const __m128 vk40 = _mm_load1_ps(weights + 21);
const __m128 vk41 = _mm_load1_ps(weights + 22);
const __m128 vk42 = _mm_load1_ps(weights + 23);
const __m128 vk43 = _mm_load1_ps(weights + 24);
const __m128 vk44 = _mm_load1_ps(weights + 25);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
__m128 vi0x6024 = _mm_setzero_ps();
__m128 vi1x6024 = _mm_setzero_ps();
__m128 vi2x6024 = _mm_setzero_ps();
__m128 vi3x6024 = _mm_setzero_ps();
__m128 vi4x6024 = _mm_setzero_ps();
__m128 vi0x7135 = _mm_setzero_ps();
__m128 vi1x7135 = _mm_setzero_ps();
__m128 vi2x7135 = _mm_setzero_ps();
__m128 vi3x7135 = _mm_setzero_ps();
__m128 vi4x7135 = _mm_setzero_ps();
const __m128 vi0x89AB = _mm_loadu_ps(i0);
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vi1x89AB = _mm_loadu_ps(i1);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vi2x89AB = _mm_loadu_ps(i2);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vi3x89AB = _mm_loadu_ps(i3);
const __m128 vi3xCDEF = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vi4x89AB = _mm_loadu_ps(i4);
const __m128 vi4xCDEF = _mm_loadu_ps(i4 + 4);
i4 += 8;
__m128 vi0x8ACE = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi0x9BDF = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
__m128 vi1x8ACE = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi1x9BDF = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
__m128 vi2x8ACE = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi2x9BDF = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
__m128 vi3x8ACE = _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi3x9BDF = _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
__m128 vi4x8ACE = _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi4x9BDF = _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk02));
__m128 vo0p1 = _mm_mul_ps(vi1x8ACE, vk12);
__m128 vo0p2 = _mm_mul_ps(vi2x8ACE, vk22);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x8ACE, vk32));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x8ACE, vk42));
const __m128 vi0xE8AC = _mm_shuffle_ps(vi0x8ACE, vi0x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xE8AC = _mm_shuffle_ps(vi1x8ACE, vi1x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xE8AC = _mm_shuffle_ps(vi2x8ACE, vi2x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3xE8AC = _mm_shuffle_ps(vi3x8ACE, vi3x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4xE8AC = _mm_shuffle_ps(vi4x8ACE, vi4x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x9BDF, vk03));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk13));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x9BDF, vk23));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi3x9BDF, vk33));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x9BDF, vk43));
const __m128 vi0x68AC = _mm_move_ss(vi0xE8AC, vi0x6024);
vi0x6024 = vi0xE8AC;
const __m128 vi1x68AC = _mm_move_ss(vi1xE8AC, vi1x6024);
vi1x6024 = vi1xE8AC;
const __m128 vi2x68AC = _mm_move_ss(vi2xE8AC, vi2x6024);
vi2x6024 = vi2xE8AC;
const __m128 vi3x68AC = _mm_move_ss(vi3xE8AC, vi3x6024);
vi3x6024 = vi3xE8AC;
const __m128 vi4x68AC = _mm_move_ss(vi4xE8AC, vi4x6024);
vi4x6024 = vi4xE8AC;
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3xF9BD = _mm_shuffle_ps(vi3x9BDF, vi3x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4xF9BD = _mm_shuffle_ps(vi4x9BDF, vi4x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x68AC, vk00));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi1x68AC, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x68AC, vk20));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x68AC, vk30));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi4x68AC, vk40));
const __m128 vi0xGHIJ = _mm_loadu_ps(i0);
const __m128 vi0xKLMN = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vi1xGHIJ = _mm_loadu_ps(i1);
const __m128 vi1xKLMN = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vi2xGHIJ = _mm_loadu_ps(i2);
const __m128 vi2xKLMN = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vi3xGHIJ = _mm_loadu_ps(i3);
const __m128 vi3xKLMN = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vi4xGHIJ = _mm_loadu_ps(i4);
const __m128 vi4xKLMN = _mm_loadu_ps(i4 + 4);
i4 += 8;
const __m128 vi0x79BD = _mm_move_ss(vi0xF9BD, vi0x7135);
vi0x7135 = vi0xF9BD;
const __m128 vi1x79BD = _mm_move_ss(vi1xF9BD, vi1x7135);
vi1x7135 = vi1xF9BD;
const __m128 vi2x79BD = _mm_move_ss(vi2xF9BD, vi2x7135);
vi2x7135 = vi2xF9BD;
const __m128 vi3x79BD = _mm_move_ss(vi3xF9BD, vi3x7135);
vi3x7135 = vi3xF9BD;
const __m128 vi4x79BD = _mm_move_ss(vi4xF9BD, vi4x7135);
vi4x7135 = vi4xF9BD;
const __m128 vi0xGIKM = _mm_shuffle_ps(vi0xGHIJ, vi0xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi0xHJLN = _mm_shuffle_ps(vi0xGHIJ, vi0xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi0x9BDF = vi0xHJLN;
const __m128 vi1xGIKM = _mm_shuffle_ps(vi1xGHIJ, vi1xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi1xHJLN = _mm_shuffle_ps(vi1xGHIJ, vi1xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi1x9BDF = vi1xHJLN;
const __m128 vi2xGIKM = _mm_shuffle_ps(vi2xGHIJ, vi2xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi2xHJLN = _mm_shuffle_ps(vi2xGHIJ, vi2xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi2x9BDF = vi2xHJLN;
const __m128 vi3xGIKM = _mm_shuffle_ps(vi3xGHIJ, vi3xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi3xHJLN = _mm_shuffle_ps(vi3xGHIJ, vi3xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi3x9BDF = vi3xHJLN;
const __m128 vi4xGIKM = _mm_shuffle_ps(vi4xGHIJ, vi4xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi4xHJLN = _mm_shuffle_ps(vi4xGHIJ, vi4xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi4x9BDF = vi4xHJLN;
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x79BD, vk01));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x79BD, vk11));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x79BD, vk21));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x79BD, vk31));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x79BD, vk41));
const __m128 vi0xGACE = _mm_move_ss(vi0x8ACE, vi0xGIKM);
vi0x8ACE = vi0xGIKM;
const __m128 vi1xGACE = _mm_move_ss(vi1x8ACE, vi1xGIKM);
vi1x8ACE = vi1xGIKM;
const __m128 vi2xGACE = _mm_move_ss(vi2x8ACE, vi2xGIKM);
vi2x8ACE = vi2xGIKM;
const __m128 vi3xGACE = _mm_move_ss(vi3x8ACE, vi3xGIKM);
vi3x8ACE = vi3xGIKM;
const __m128 vi4xGACE = _mm_move_ss(vi4x8ACE, vi4xGIKM);
vi4x8ACE = vi4xGIKM;
const __m128 vi0xACEG = _mm_shuffle_ps(vi0xGACE, vi0xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1xACEG = _mm_shuffle_ps(vi1xGACE, vi1xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2xACEG = _mm_shuffle_ps(vi2xGACE, vi2xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3xACEG = _mm_shuffle_ps(vi3xGACE, vi3xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4xACEG = _mm_shuffle_ps(vi4xGACE, vi4xGACE, _MM_SHUFFLE(0, 3, 2, 1));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0xACEG, vk04));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1xACEG, vk14));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2xACEG, vk24));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi3xACEG, vk34));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4xACEG, vk44));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
vi0x8ACE = _mm_and_ps(vi0x8ACE, vmask_even);
vi0x9BDF = _mm_and_ps(vi0x9BDF, vmask_odd);
vi1x8ACE = _mm_and_ps(vi1x8ACE, vmask_even);
vi1x9BDF = _mm_and_ps(vi1x9BDF, vmask_odd);
vi2x8ACE = _mm_and_ps(vi2x8ACE, vmask_even);
vi2x9BDF = _mm_and_ps(vi2x9BDF, vmask_odd);
vi3x8ACE = _mm_and_ps(vi3x8ACE, vmask_even);
vi3x9BDF = _mm_and_ps(vi3x9BDF, vmask_odd);
vi4x8ACE = _mm_and_ps(vi4x8ACE, vmask_even);
vi4x9BDF = _mm_and_ps(vi4x9BDF, vmask_odd);
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk02));
__m128 vo0p1 = _mm_mul_ps(vi1x8ACE, vk12);
__m128 vo0p2 = _mm_mul_ps(vi2x8ACE, vk22);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x8ACE, vk32));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x8ACE, vk42));
const __m128 vi0xE8AC = _mm_shuffle_ps(vi0x8ACE, vi0x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xE8AC = _mm_shuffle_ps(vi1x8ACE, vi1x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xE8AC = _mm_shuffle_ps(vi2x8ACE, vi2x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3xE8AC = _mm_shuffle_ps(vi3x8ACE, vi3x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4xE8AC = _mm_shuffle_ps(vi4x8ACE, vi4x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x9BDF, vk03));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk13));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x9BDF, vk23));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi3x9BDF, vk33));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x9BDF, vk43));
const __m128 vi0x68AC = _mm_move_ss(vi0xE8AC, vi0x6024);
const __m128 vi1x68AC = _mm_move_ss(vi1xE8AC, vi1x6024);
const __m128 vi2x68AC = _mm_move_ss(vi2xE8AC, vi2x6024);
const __m128 vi3x68AC = _mm_move_ss(vi3xE8AC, vi3x6024);
const __m128 vi4x68AC = _mm_move_ss(vi4xE8AC, vi4x6024);
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3xF9BD = _mm_shuffle_ps(vi3x9BDF, vi3x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4xF9BD = _mm_shuffle_ps(vi4x9BDF, vi4x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x68AC, vk00));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi1x68AC, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x68AC, vk20));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x68AC, vk30));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi4x68AC, vk40));
const __m128 vi0x79BD = _mm_move_ss(vi0xF9BD, vi0x7135);
const __m128 vi1x79BD = _mm_move_ss(vi1xF9BD, vi1x7135);
const __m128 vi2x79BD = _mm_move_ss(vi2xF9BD, vi2x7135);
const __m128 vi3x79BD = _mm_move_ss(vi3xF9BD, vi3x7135);
const __m128 vi4x79BD = _mm_move_ss(vi4xF9BD, vi4x7135);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x79BD, vk01));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x79BD, vk11));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x79BD, vk21));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x79BD, vk31));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x79BD, vk41));
const __m128 vzero = _mm_setzero_ps();
const __m128 vi0xGACE = _mm_move_ss(vi0x8ACE, vzero);
const __m128 vi1xGACE = _mm_move_ss(vi1x8ACE, vzero);
const __m128 vi2xGACE = _mm_move_ss(vi2x8ACE, vzero);
const __m128 vi3xGACE = _mm_move_ss(vi3x8ACE, vzero);
const __m128 vi4xGACE = _mm_move_ss(vi4x8ACE, vzero);
const __m128 vi0xACEG = _mm_shuffle_ps(vi0xGACE, vi0xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1xACEG = _mm_shuffle_ps(vi1xGACE, vi1xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2xACEG = _mm_shuffle_ps(vi2xGACE, vi2xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3xACEG = _mm_shuffle_ps(vi3xGACE, vi3xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4xACEG = _mm_shuffle_ps(vi4xGACE, vi4xGACE, _MM_SHUFFLE(0, 3, 2, 1));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0xACEG, vk04));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1xACEG, vk14));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2xACEG, vk24));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi3xACEG, vk34));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4xACEG, vk44));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w_tmp & 2) {
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w_tmp & 1) {
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 16,718 | 43.823056 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-sse-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__sse_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const __m128 vmask_even = _mm_load_ps((const float*) params->sse_stride2.mask_even);
const __m128 vmask_odd = _mm_load_ps((const float*) params->sse_stride2.mask_odd);
const __m128 vmax = _mm_load_ps(params->sse_stride2.max);
const __m128 vmin = _mm_load_ps(params->sse_stride2.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk03 = _mm_load1_ps(weights + 4);
const __m128 vk04 = _mm_load1_ps(weights + 5);
const __m128 vk10 = _mm_load1_ps(weights + 6);
const __m128 vk11 = _mm_load1_ps(weights + 7);
const __m128 vk12 = _mm_load1_ps(weights + 8);
const __m128 vk13 = _mm_load1_ps(weights + 9);
const __m128 vk14 = _mm_load1_ps(weights + 10);
const __m128 vk20 = _mm_load1_ps(weights + 11);
const __m128 vk21 = _mm_load1_ps(weights + 12);
const __m128 vk22 = _mm_load1_ps(weights + 13);
const __m128 vk23 = _mm_load1_ps(weights + 14);
const __m128 vk24 = _mm_load1_ps(weights + 15);
const __m128 vk30 = _mm_load1_ps(weights + 16);
const __m128 vk31 = _mm_load1_ps(weights + 17);
const __m128 vk32 = _mm_load1_ps(weights + 18);
const __m128 vk33 = _mm_load1_ps(weights + 19);
const __m128 vk34 = _mm_load1_ps(weights + 20);
const __m128 vk40 = _mm_load1_ps(weights + 21);
const __m128 vk41 = _mm_load1_ps(weights + 22);
const __m128 vk42 = _mm_load1_ps(weights + 23);
const __m128 vk43 = _mm_load1_ps(weights + 24);
const __m128 vk44 = _mm_load1_ps(weights + 25);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
__m128 vi0x6024 = _mm_setzero_ps();
__m128 vi1x6024 = _mm_setzero_ps();
__m128 vi2x6024 = _mm_setzero_ps();
__m128 vi3x6024 = _mm_setzero_ps();
__m128 vi4x6024 = _mm_setzero_ps();
__m128 vi0x7135 = _mm_setzero_ps();
__m128 vi1x7135 = _mm_setzero_ps();
__m128 vi2x7135 = _mm_setzero_ps();
__m128 vi3x7135 = _mm_setzero_ps();
__m128 vi4x7135 = _mm_setzero_ps();
const __m128 vi0x89AB = _mm_loadu_ps(i0);
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vi1x89AB = _mm_loadu_ps(i1);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vi2x89AB = _mm_loadu_ps(i2);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vi3x89AB = _mm_loadu_ps(i3);
const __m128 vi3xCDEF = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vi4x89AB = _mm_loadu_ps(i4);
const __m128 vi4xCDEF = _mm_loadu_ps(i4 + 4);
i4 += 8;
__m128 vi0x8ACE = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi0x9BDF = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
__m128 vi1x8ACE = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi1x9BDF = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
__m128 vi2x8ACE = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi2x9BDF = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
__m128 vi3x8ACE = _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi3x9BDF = _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
__m128 vi4x8ACE = _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi4x9BDF = _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk02));
__m128 vo0p1 = _mm_mul_ps(vi1x8ACE, vk12);
__m128 vo0p2 = _mm_mul_ps(vi2x8ACE, vk22);
__m128 vo0p3 = _mm_mul_ps(vi3x8ACE, vk32);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x8ACE, vk42));
const __m128 vi0xE8AC = _mm_shuffle_ps(vi0x8ACE, vi0x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xE8AC = _mm_shuffle_ps(vi1x8ACE, vi1x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xE8AC = _mm_shuffle_ps(vi2x8ACE, vi2x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3xE8AC = _mm_shuffle_ps(vi3x8ACE, vi3x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4xE8AC = _mm_shuffle_ps(vi4x8ACE, vi4x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x9BDF, vk03));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi1x9BDF, vk13));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi2x9BDF, vk23));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x9BDF, vk33));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x9BDF, vk43));
const __m128 vi0x68AC = _mm_move_ss(vi0xE8AC, vi0x6024);
vi0x6024 = vi0xE8AC;
const __m128 vi1x68AC = _mm_move_ss(vi1xE8AC, vi1x6024);
vi1x6024 = vi1xE8AC;
const __m128 vi2x68AC = _mm_move_ss(vi2xE8AC, vi2x6024);
vi2x6024 = vi2xE8AC;
const __m128 vi3x68AC = _mm_move_ss(vi3xE8AC, vi3x6024);
vi3x6024 = vi3xE8AC;
const __m128 vi4x68AC = _mm_move_ss(vi4xE8AC, vi4x6024);
vi4x6024 = vi4xE8AC;
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3xF9BD = _mm_shuffle_ps(vi3x9BDF, vi3x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4xF9BD = _mm_shuffle_ps(vi4x9BDF, vi4x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x68AC, vk00));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi1x68AC, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x68AC, vk20));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x68AC, vk30));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi4x68AC, vk40));
const __m128 vi0xGHIJ = _mm_loadu_ps(i0);
const __m128 vi0xKLMN = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vi1xGHIJ = _mm_loadu_ps(i1);
const __m128 vi1xKLMN = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vi2xGHIJ = _mm_loadu_ps(i2);
const __m128 vi2xKLMN = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vi3xGHIJ = _mm_loadu_ps(i3);
const __m128 vi3xKLMN = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vi4xGHIJ = _mm_loadu_ps(i4);
const __m128 vi4xKLMN = _mm_loadu_ps(i4 + 4);
i4 += 8;
const __m128 vi0x79BD = _mm_move_ss(vi0xF9BD, vi0x7135);
vi0x7135 = vi0xF9BD;
const __m128 vi1x79BD = _mm_move_ss(vi1xF9BD, vi1x7135);
vi1x7135 = vi1xF9BD;
const __m128 vi2x79BD = _mm_move_ss(vi2xF9BD, vi2x7135);
vi2x7135 = vi2xF9BD;
const __m128 vi3x79BD = _mm_move_ss(vi3xF9BD, vi3x7135);
vi3x7135 = vi3xF9BD;
const __m128 vi4x79BD = _mm_move_ss(vi4xF9BD, vi4x7135);
vi4x7135 = vi4xF9BD;
const __m128 vi0xGIKM = _mm_shuffle_ps(vi0xGHIJ, vi0xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi0xHJLN = _mm_shuffle_ps(vi0xGHIJ, vi0xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi0x9BDF = vi0xHJLN;
const __m128 vi1xGIKM = _mm_shuffle_ps(vi1xGHIJ, vi1xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi1xHJLN = _mm_shuffle_ps(vi1xGHIJ, vi1xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi1x9BDF = vi1xHJLN;
const __m128 vi2xGIKM = _mm_shuffle_ps(vi2xGHIJ, vi2xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi2xHJLN = _mm_shuffle_ps(vi2xGHIJ, vi2xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi2x9BDF = vi2xHJLN;
const __m128 vi3xGIKM = _mm_shuffle_ps(vi3xGHIJ, vi3xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi3xHJLN = _mm_shuffle_ps(vi3xGHIJ, vi3xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi3x9BDF = vi3xHJLN;
const __m128 vi4xGIKM = _mm_shuffle_ps(vi4xGHIJ, vi4xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi4xHJLN = _mm_shuffle_ps(vi4xGHIJ, vi4xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi4x9BDF = vi4xHJLN;
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi0x79BD, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x79BD, vk11));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x79BD, vk21));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi3x79BD, vk31));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi4x79BD, vk41));
const __m128 vi0xGACE = _mm_move_ss(vi0x8ACE, vi0xGIKM);
vi0x8ACE = vi0xGIKM;
const __m128 vi1xGACE = _mm_move_ss(vi1x8ACE, vi1xGIKM);
vi1x8ACE = vi1xGIKM;
const __m128 vi2xGACE = _mm_move_ss(vi2x8ACE, vi2xGIKM);
vi2x8ACE = vi2xGIKM;
const __m128 vi3xGACE = _mm_move_ss(vi3x8ACE, vi3xGIKM);
vi3x8ACE = vi3xGIKM;
const __m128 vi4xGACE = _mm_move_ss(vi4x8ACE, vi4xGIKM);
vi4x8ACE = vi4xGIKM;
const __m128 vi0xACEG = _mm_shuffle_ps(vi0xGACE, vi0xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1xACEG = _mm_shuffle_ps(vi1xGACE, vi1xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2xACEG = _mm_shuffle_ps(vi2xGACE, vi2xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3xACEG = _mm_shuffle_ps(vi3xGACE, vi3xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4xACEG = _mm_shuffle_ps(vi4xGACE, vi4xGACE, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0xACEG, vk04));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1xACEG, vk14));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2xACEG, vk24));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3xACEG, vk34));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4xACEG, vk44));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p2 = _mm_add_ps(vo0p2, vo0p3);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
vi0x8ACE = _mm_and_ps(vi0x8ACE, vmask_even);
vi0x9BDF = _mm_and_ps(vi0x9BDF, vmask_odd);
vi1x8ACE = _mm_and_ps(vi1x8ACE, vmask_even);
vi1x9BDF = _mm_and_ps(vi1x9BDF, vmask_odd);
vi2x8ACE = _mm_and_ps(vi2x8ACE, vmask_even);
vi2x9BDF = _mm_and_ps(vi2x9BDF, vmask_odd);
vi3x8ACE = _mm_and_ps(vi3x8ACE, vmask_even);
vi3x9BDF = _mm_and_ps(vi3x9BDF, vmask_odd);
vi4x8ACE = _mm_and_ps(vi4x8ACE, vmask_even);
vi4x9BDF = _mm_and_ps(vi4x9BDF, vmask_odd);
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk02));
__m128 vo0p1 = _mm_mul_ps(vi1x8ACE, vk12);
__m128 vo0p2 = _mm_mul_ps(vi2x8ACE, vk22);
__m128 vo0p3 = _mm_mul_ps(vi3x8ACE, vk32);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x8ACE, vk42));
const __m128 vi0xE8AC = _mm_shuffle_ps(vi0x8ACE, vi0x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xE8AC = _mm_shuffle_ps(vi1x8ACE, vi1x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xE8AC = _mm_shuffle_ps(vi2x8ACE, vi2x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3xE8AC = _mm_shuffle_ps(vi3x8ACE, vi3x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4xE8AC = _mm_shuffle_ps(vi4x8ACE, vi4x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x9BDF, vk03));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi1x9BDF, vk13));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi2x9BDF, vk23));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x9BDF, vk33));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x9BDF, vk43));
const __m128 vi0x68AC = _mm_move_ss(vi0xE8AC, vi0x6024);
const __m128 vi1x68AC = _mm_move_ss(vi1xE8AC, vi1x6024);
const __m128 vi2x68AC = _mm_move_ss(vi2xE8AC, vi2x6024);
const __m128 vi3x68AC = _mm_move_ss(vi3xE8AC, vi3x6024);
const __m128 vi4x68AC = _mm_move_ss(vi4xE8AC, vi4x6024);
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3xF9BD = _mm_shuffle_ps(vi3x9BDF, vi3x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4xF9BD = _mm_shuffle_ps(vi4x9BDF, vi4x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x68AC, vk00));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi1x68AC, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x68AC, vk20));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x68AC, vk30));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi4x68AC, vk40));
const __m128 vi0x79BD = _mm_move_ss(vi0xF9BD, vi0x7135);
const __m128 vi1x79BD = _mm_move_ss(vi1xF9BD, vi1x7135);
const __m128 vi2x79BD = _mm_move_ss(vi2xF9BD, vi2x7135);
const __m128 vi3x79BD = _mm_move_ss(vi3xF9BD, vi3x7135);
const __m128 vi4x79BD = _mm_move_ss(vi4xF9BD, vi4x7135);
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi0x79BD, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x79BD, vk11));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x79BD, vk21));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi3x79BD, vk31));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi4x79BD, vk41));
const __m128 vzero = _mm_setzero_ps();
const __m128 vi0xGACE = _mm_move_ss(vi0x8ACE, vzero);
const __m128 vi1xGACE = _mm_move_ss(vi1x8ACE, vzero);
const __m128 vi2xGACE = _mm_move_ss(vi2x8ACE, vzero);
const __m128 vi3xGACE = _mm_move_ss(vi3x8ACE, vzero);
const __m128 vi4xGACE = _mm_move_ss(vi4x8ACE, vzero);
const __m128 vi0xACEG = _mm_shuffle_ps(vi0xGACE, vi0xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1xACEG = _mm_shuffle_ps(vi1xGACE, vi1xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2xACEG = _mm_shuffle_ps(vi2xGACE, vi2xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3xACEG = _mm_shuffle_ps(vi3xGACE, vi3xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4xACEG = _mm_shuffle_ps(vi4xGACE, vi4xGACE, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0xACEG, vk04));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1xACEG, vk14));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2xACEG, vk24));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3xACEG, vk34));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4xACEG, vk44));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p2 = _mm_add_ps(vo0p2, vo0p3);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w_tmp & 2) {
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w_tmp & 1) {
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 16,774 | 43.733333 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-sse-1x4-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__sse_1x4_acc5(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const __m128 vmask_even = _mm_load_ps((const float*) params->sse_stride2.mask_even);
const __m128 vmask_odd = _mm_load_ps((const float*) params->sse_stride2.mask_odd);
const __m128 vmax = _mm_load_ps(params->sse_stride2.max);
const __m128 vmin = _mm_load_ps(params->sse_stride2.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk03 = _mm_load1_ps(weights + 4);
const __m128 vk04 = _mm_load1_ps(weights + 5);
const __m128 vk10 = _mm_load1_ps(weights + 6);
const __m128 vk11 = _mm_load1_ps(weights + 7);
const __m128 vk12 = _mm_load1_ps(weights + 8);
const __m128 vk13 = _mm_load1_ps(weights + 9);
const __m128 vk14 = _mm_load1_ps(weights + 10);
const __m128 vk20 = _mm_load1_ps(weights + 11);
const __m128 vk21 = _mm_load1_ps(weights + 12);
const __m128 vk22 = _mm_load1_ps(weights + 13);
const __m128 vk23 = _mm_load1_ps(weights + 14);
const __m128 vk24 = _mm_load1_ps(weights + 15);
const __m128 vk30 = _mm_load1_ps(weights + 16);
const __m128 vk31 = _mm_load1_ps(weights + 17);
const __m128 vk32 = _mm_load1_ps(weights + 18);
const __m128 vk33 = _mm_load1_ps(weights + 19);
const __m128 vk34 = _mm_load1_ps(weights + 20);
const __m128 vk40 = _mm_load1_ps(weights + 21);
const __m128 vk41 = _mm_load1_ps(weights + 22);
const __m128 vk42 = _mm_load1_ps(weights + 23);
const __m128 vk43 = _mm_load1_ps(weights + 24);
const __m128 vk44 = _mm_load1_ps(weights + 25);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
__m128 vi0x6024 = _mm_setzero_ps();
__m128 vi1x6024 = _mm_setzero_ps();
__m128 vi2x6024 = _mm_setzero_ps();
__m128 vi3x6024 = _mm_setzero_ps();
__m128 vi4x6024 = _mm_setzero_ps();
__m128 vi0x7135 = _mm_setzero_ps();
__m128 vi1x7135 = _mm_setzero_ps();
__m128 vi2x7135 = _mm_setzero_ps();
__m128 vi3x7135 = _mm_setzero_ps();
__m128 vi4x7135 = _mm_setzero_ps();
const __m128 vi0x89AB = _mm_loadu_ps(i0);
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vi1x89AB = _mm_loadu_ps(i1);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vi2x89AB = _mm_loadu_ps(i2);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vi3x89AB = _mm_loadu_ps(i3);
const __m128 vi3xCDEF = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vi4x89AB = _mm_loadu_ps(i4);
const __m128 vi4xCDEF = _mm_loadu_ps(i4 + 4);
i4 += 8;
__m128 vi0x8ACE = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi0x9BDF = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
__m128 vi1x8ACE = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi1x9BDF = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
__m128 vi2x8ACE = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi2x9BDF = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
__m128 vi3x8ACE = _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi3x9BDF = _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
__m128 vi4x8ACE = _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi4x9BDF = _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk02));
__m128 vo0p1 = _mm_mul_ps(vi1x8ACE, vk12);
__m128 vo0p2 = _mm_mul_ps(vi2x8ACE, vk22);
__m128 vo0p3 = _mm_mul_ps(vi3x8ACE, vk32);
__m128 vo0p4 = _mm_mul_ps(vi4x8ACE, vk42);
const __m128 vi0xE8AC = _mm_shuffle_ps(vi0x8ACE, vi0x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xE8AC = _mm_shuffle_ps(vi1x8ACE, vi1x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xE8AC = _mm_shuffle_ps(vi2x8ACE, vi2x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3xE8AC = _mm_shuffle_ps(vi3x8ACE, vi3x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4xE8AC = _mm_shuffle_ps(vi4x8ACE, vi4x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x9BDF, vk03));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x9BDF, vk13));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x9BDF, vk23));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3x9BDF, vk33));
vo0p4 = _mm_add_ps(vo0p4, _mm_mul_ps(vi4x9BDF, vk43));
const __m128 vi0x68AC = _mm_move_ss(vi0xE8AC, vi0x6024);
vi0x6024 = vi0xE8AC;
const __m128 vi1x68AC = _mm_move_ss(vi1xE8AC, vi1x6024);
vi1x6024 = vi1xE8AC;
const __m128 vi2x68AC = _mm_move_ss(vi2xE8AC, vi2x6024);
vi2x6024 = vi2xE8AC;
const __m128 vi3x68AC = _mm_move_ss(vi3xE8AC, vi3x6024);
vi3x6024 = vi3xE8AC;
const __m128 vi4x68AC = _mm_move_ss(vi4xE8AC, vi4x6024);
vi4x6024 = vi4xE8AC;
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3xF9BD = _mm_shuffle_ps(vi3x9BDF, vi3x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4xF9BD = _mm_shuffle_ps(vi4x9BDF, vi4x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x68AC, vk00));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x68AC, vk10));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x68AC, vk20));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3x68AC, vk30));
vo0p4 = _mm_add_ps(vo0p4, _mm_mul_ps(vi4x68AC, vk40));
const __m128 vi0xGHIJ = _mm_loadu_ps(i0);
const __m128 vi0xKLMN = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vi1xGHIJ = _mm_loadu_ps(i1);
const __m128 vi1xKLMN = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vi2xGHIJ = _mm_loadu_ps(i2);
const __m128 vi2xKLMN = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vi3xGHIJ = _mm_loadu_ps(i3);
const __m128 vi3xKLMN = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vi4xGHIJ = _mm_loadu_ps(i4);
const __m128 vi4xKLMN = _mm_loadu_ps(i4 + 4);
i4 += 8;
const __m128 vi0x79BD = _mm_move_ss(vi0xF9BD, vi0x7135);
vi0x7135 = vi0xF9BD;
const __m128 vi1x79BD = _mm_move_ss(vi1xF9BD, vi1x7135);
vi1x7135 = vi1xF9BD;
const __m128 vi2x79BD = _mm_move_ss(vi2xF9BD, vi2x7135);
vi2x7135 = vi2xF9BD;
const __m128 vi3x79BD = _mm_move_ss(vi3xF9BD, vi3x7135);
vi3x7135 = vi3xF9BD;
const __m128 vi4x79BD = _mm_move_ss(vi4xF9BD, vi4x7135);
vi4x7135 = vi4xF9BD;
const __m128 vi0xGIKM = _mm_shuffle_ps(vi0xGHIJ, vi0xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi0xHJLN = _mm_shuffle_ps(vi0xGHIJ, vi0xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi0x9BDF = vi0xHJLN;
const __m128 vi1xGIKM = _mm_shuffle_ps(vi1xGHIJ, vi1xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi1xHJLN = _mm_shuffle_ps(vi1xGHIJ, vi1xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi1x9BDF = vi1xHJLN;
const __m128 vi2xGIKM = _mm_shuffle_ps(vi2xGHIJ, vi2xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi2xHJLN = _mm_shuffle_ps(vi2xGHIJ, vi2xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi2x9BDF = vi2xHJLN;
const __m128 vi3xGIKM = _mm_shuffle_ps(vi3xGHIJ, vi3xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi3xHJLN = _mm_shuffle_ps(vi3xGHIJ, vi3xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi3x9BDF = vi3xHJLN;
const __m128 vi4xGIKM = _mm_shuffle_ps(vi4xGHIJ, vi4xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi4xHJLN = _mm_shuffle_ps(vi4xGHIJ, vi4xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi4x9BDF = vi4xHJLN;
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x79BD, vk01));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x79BD, vk11));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x79BD, vk21));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3x79BD, vk31));
vo0p4 = _mm_add_ps(vo0p4, _mm_mul_ps(vi4x79BD, vk41));
const __m128 vi0xGACE = _mm_move_ss(vi0x8ACE, vi0xGIKM);
vi0x8ACE = vi0xGIKM;
const __m128 vi1xGACE = _mm_move_ss(vi1x8ACE, vi1xGIKM);
vi1x8ACE = vi1xGIKM;
const __m128 vi2xGACE = _mm_move_ss(vi2x8ACE, vi2xGIKM);
vi2x8ACE = vi2xGIKM;
const __m128 vi3xGACE = _mm_move_ss(vi3x8ACE, vi3xGIKM);
vi3x8ACE = vi3xGIKM;
const __m128 vi4xGACE = _mm_move_ss(vi4x8ACE, vi4xGIKM);
vi4x8ACE = vi4xGIKM;
const __m128 vi0xACEG = _mm_shuffle_ps(vi0xGACE, vi0xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1xACEG = _mm_shuffle_ps(vi1xGACE, vi1xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2xACEG = _mm_shuffle_ps(vi2xGACE, vi2xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3xACEG = _mm_shuffle_ps(vi3xGACE, vi3xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4xACEG = _mm_shuffle_ps(vi4xGACE, vi4xGACE, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0xACEG, vk04));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1xACEG, vk14));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2xACEG, vk24));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3xACEG, vk34));
vo0p4 = _mm_add_ps(vo0p4, _mm_mul_ps(vi4xACEG, vk44));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p2 = _mm_add_ps(vo0p2, vo0p3);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
vo0p0 = _mm_add_ps(vo0p0, vo0p4);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
vi0x8ACE = _mm_and_ps(vi0x8ACE, vmask_even);
vi0x9BDF = _mm_and_ps(vi0x9BDF, vmask_odd);
vi1x8ACE = _mm_and_ps(vi1x8ACE, vmask_even);
vi1x9BDF = _mm_and_ps(vi1x9BDF, vmask_odd);
vi2x8ACE = _mm_and_ps(vi2x8ACE, vmask_even);
vi2x9BDF = _mm_and_ps(vi2x9BDF, vmask_odd);
vi3x8ACE = _mm_and_ps(vi3x8ACE, vmask_even);
vi3x9BDF = _mm_and_ps(vi3x9BDF, vmask_odd);
vi4x8ACE = _mm_and_ps(vi4x8ACE, vmask_even);
vi4x9BDF = _mm_and_ps(vi4x9BDF, vmask_odd);
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk02));
__m128 vo0p1 = _mm_mul_ps(vi1x8ACE, vk12);
__m128 vo0p2 = _mm_mul_ps(vi2x8ACE, vk22);
__m128 vo0p3 = _mm_mul_ps(vi3x8ACE, vk32);
__m128 vo0p4 = _mm_mul_ps(vi4x8ACE, vk42);
const __m128 vi0xE8AC = _mm_shuffle_ps(vi0x8ACE, vi0x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xE8AC = _mm_shuffle_ps(vi1x8ACE, vi1x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xE8AC = _mm_shuffle_ps(vi2x8ACE, vi2x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3xE8AC = _mm_shuffle_ps(vi3x8ACE, vi3x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4xE8AC = _mm_shuffle_ps(vi4x8ACE, vi4x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x9BDF, vk03));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x9BDF, vk13));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x9BDF, vk23));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3x9BDF, vk33));
vo0p4 = _mm_add_ps(vo0p4, _mm_mul_ps(vi4x9BDF, vk43));
const __m128 vi0x68AC = _mm_move_ss(vi0xE8AC, vi0x6024);
const __m128 vi1x68AC = _mm_move_ss(vi1xE8AC, vi1x6024);
const __m128 vi2x68AC = _mm_move_ss(vi2xE8AC, vi2x6024);
const __m128 vi3x68AC = _mm_move_ss(vi3xE8AC, vi3x6024);
const __m128 vi4x68AC = _mm_move_ss(vi4xE8AC, vi4x6024);
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3xF9BD = _mm_shuffle_ps(vi3x9BDF, vi3x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4xF9BD = _mm_shuffle_ps(vi4x9BDF, vi4x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x68AC, vk00));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x68AC, vk10));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x68AC, vk20));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3x68AC, vk30));
vo0p4 = _mm_add_ps(vo0p4, _mm_mul_ps(vi4x68AC, vk40));
const __m128 vi0x79BD = _mm_move_ss(vi0xF9BD, vi0x7135);
const __m128 vi1x79BD = _mm_move_ss(vi1xF9BD, vi1x7135);
const __m128 vi2x79BD = _mm_move_ss(vi2xF9BD, vi2x7135);
const __m128 vi3x79BD = _mm_move_ss(vi3xF9BD, vi3x7135);
const __m128 vi4x79BD = _mm_move_ss(vi4xF9BD, vi4x7135);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x79BD, vk01));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x79BD, vk11));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x79BD, vk21));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3x79BD, vk31));
vo0p4 = _mm_add_ps(vo0p4, _mm_mul_ps(vi4x79BD, vk41));
const __m128 vzero = _mm_setzero_ps();
const __m128 vi0xGACE = _mm_move_ss(vi0x8ACE, vzero);
const __m128 vi1xGACE = _mm_move_ss(vi1x8ACE, vzero);
const __m128 vi2xGACE = _mm_move_ss(vi2x8ACE, vzero);
const __m128 vi3xGACE = _mm_move_ss(vi3x8ACE, vzero);
const __m128 vi4xGACE = _mm_move_ss(vi4x8ACE, vzero);
const __m128 vi0xACEG = _mm_shuffle_ps(vi0xGACE, vi0xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1xACEG = _mm_shuffle_ps(vi1xGACE, vi1xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2xACEG = _mm_shuffle_ps(vi2xGACE, vi2xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3xACEG = _mm_shuffle_ps(vi3xGACE, vi3xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4xACEG = _mm_shuffle_ps(vi4xGACE, vi4xGACE, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0xACEG, vk04));
vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1xACEG, vk14));
vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2xACEG, vk24));
vo0p3 = _mm_add_ps(vo0p3, _mm_mul_ps(vi3xACEG, vk34));
vo0p4 = _mm_add_ps(vo0p4, _mm_mul_ps(vi4xACEG, vk44));
vo0p0 = _mm_add_ps(vo0p0, vo0p1);
vo0p2 = _mm_add_ps(vo0p2, vo0p3);
vo0p0 = _mm_add_ps(vo0p0, vo0p2);
vo0p0 = _mm_add_ps(vo0p0, vo0p4);
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w_tmp & 2) {
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w_tmp & 1) {
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 16,830 | 43.644562 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-sse-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__sse_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const __m128 vmask_even = _mm_load_ps((const float*) params->sse_stride2.mask_even);
const __m128 vmask_odd = _mm_load_ps((const float*) params->sse_stride2.mask_odd);
const __m128 vmax = _mm_load_ps(params->sse_stride2.max);
const __m128 vmin = _mm_load_ps(params->sse_stride2.min);
const __m128 vbias = _mm_load1_ps(weights);
const __m128 vk00 = _mm_load1_ps(weights + 1);
const __m128 vk01 = _mm_load1_ps(weights + 2);
const __m128 vk02 = _mm_load1_ps(weights + 3);
const __m128 vk03 = _mm_load1_ps(weights + 4);
const __m128 vk04 = _mm_load1_ps(weights + 5);
const __m128 vk10 = _mm_load1_ps(weights + 6);
const __m128 vk11 = _mm_load1_ps(weights + 7);
const __m128 vk12 = _mm_load1_ps(weights + 8);
const __m128 vk13 = _mm_load1_ps(weights + 9);
const __m128 vk14 = _mm_load1_ps(weights + 10);
const __m128 vk20 = _mm_load1_ps(weights + 11);
const __m128 vk21 = _mm_load1_ps(weights + 12);
const __m128 vk22 = _mm_load1_ps(weights + 13);
const __m128 vk23 = _mm_load1_ps(weights + 14);
const __m128 vk24 = _mm_load1_ps(weights + 15);
const __m128 vk30 = _mm_load1_ps(weights + 16);
const __m128 vk31 = _mm_load1_ps(weights + 17);
const __m128 vk32 = _mm_load1_ps(weights + 18);
const __m128 vk33 = _mm_load1_ps(weights + 19);
const __m128 vk34 = _mm_load1_ps(weights + 20);
const __m128 vk40 = _mm_load1_ps(weights + 21);
const __m128 vk41 = _mm_load1_ps(weights + 22);
const __m128 vk42 = _mm_load1_ps(weights + 23);
const __m128 vk43 = _mm_load1_ps(weights + 24);
const __m128 vk44 = _mm_load1_ps(weights + 25);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
__m128 vi0x6024 = _mm_setzero_ps();
__m128 vi1x6024 = _mm_setzero_ps();
__m128 vi2x6024 = _mm_setzero_ps();
__m128 vi3x6024 = _mm_setzero_ps();
__m128 vi4x6024 = _mm_setzero_ps();
__m128 vi0x7135 = _mm_setzero_ps();
__m128 vi1x7135 = _mm_setzero_ps();
__m128 vi2x7135 = _mm_setzero_ps();
__m128 vi3x7135 = _mm_setzero_ps();
__m128 vi4x7135 = _mm_setzero_ps();
const __m128 vi0x89AB = _mm_loadu_ps(i0);
const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vi1x89AB = _mm_loadu_ps(i1);
const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vi2x89AB = _mm_loadu_ps(i2);
const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vi3x89AB = _mm_loadu_ps(i3);
const __m128 vi3xCDEF = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vi4x89AB = _mm_loadu_ps(i4);
const __m128 vi4xCDEF = _mm_loadu_ps(i4 + 4);
i4 += 8;
__m128 vi0x8ACE = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi0x9BDF = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
__m128 vi1x8ACE = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi1x9BDF = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
__m128 vi2x8ACE = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi2x9BDF = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
__m128 vi3x8ACE = _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi3x9BDF = _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
__m128 vi4x8ACE = _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
__m128 vi4x9BDF = _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x8ACE, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x8ACE, vk22));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x8ACE, vk32));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x8ACE, vk42));
const __m128 vi0xE8AC = _mm_shuffle_ps(vi0x8ACE, vi0x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xE8AC = _mm_shuffle_ps(vi1x8ACE, vi1x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xE8AC = _mm_shuffle_ps(vi2x8ACE, vi2x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3xE8AC = _mm_shuffle_ps(vi3x8ACE, vi3x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4xE8AC = _mm_shuffle_ps(vi4x8ACE, vi4x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x9BDF, vk03));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk13));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x9BDF, vk23));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x9BDF, vk33));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x9BDF, vk43));
const __m128 vi0x68AC = _mm_move_ss(vi0xE8AC, vi0x6024);
vi0x6024 = vi0xE8AC;
const __m128 vi1x68AC = _mm_move_ss(vi1xE8AC, vi1x6024);
vi1x6024 = vi1xE8AC;
const __m128 vi2x68AC = _mm_move_ss(vi2xE8AC, vi2x6024);
vi2x6024 = vi2xE8AC;
const __m128 vi3x68AC = _mm_move_ss(vi3xE8AC, vi3x6024);
vi3x6024 = vi3xE8AC;
const __m128 vi4x68AC = _mm_move_ss(vi4xE8AC, vi4x6024);
vi4x6024 = vi4xE8AC;
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3xF9BD = _mm_shuffle_ps(vi3x9BDF, vi3x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4xF9BD = _mm_shuffle_ps(vi4x9BDF, vi4x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x68AC, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x68AC, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x68AC, vk20));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x68AC, vk30));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x68AC, vk40));
const __m128 vi0xGHIJ = _mm_loadu_ps(i0);
const __m128 vi0xKLMN = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vi1xGHIJ = _mm_loadu_ps(i1);
const __m128 vi1xKLMN = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vi2xGHIJ = _mm_loadu_ps(i2);
const __m128 vi2xKLMN = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vi3xGHIJ = _mm_loadu_ps(i3);
const __m128 vi3xKLMN = _mm_loadu_ps(i3 + 4);
i3 += 8;
const __m128 vi4xGHIJ = _mm_loadu_ps(i4);
const __m128 vi4xKLMN = _mm_loadu_ps(i4 + 4);
i4 += 8;
const __m128 vi0x79BD = _mm_move_ss(vi0xF9BD, vi0x7135);
vi0x7135 = vi0xF9BD;
const __m128 vi1x79BD = _mm_move_ss(vi1xF9BD, vi1x7135);
vi1x7135 = vi1xF9BD;
const __m128 vi2x79BD = _mm_move_ss(vi2xF9BD, vi2x7135);
vi2x7135 = vi2xF9BD;
const __m128 vi3x79BD = _mm_move_ss(vi3xF9BD, vi3x7135);
vi3x7135 = vi3xF9BD;
const __m128 vi4x79BD = _mm_move_ss(vi4xF9BD, vi4x7135);
vi4x7135 = vi4xF9BD;
const __m128 vi0xGIKM = _mm_shuffle_ps(vi0xGHIJ, vi0xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi0xHJLN = _mm_shuffle_ps(vi0xGHIJ, vi0xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi0x9BDF = vi0xHJLN;
const __m128 vi1xGIKM = _mm_shuffle_ps(vi1xGHIJ, vi1xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi1xHJLN = _mm_shuffle_ps(vi1xGHIJ, vi1xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi1x9BDF = vi1xHJLN;
const __m128 vi2xGIKM = _mm_shuffle_ps(vi2xGHIJ, vi2xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi2xHJLN = _mm_shuffle_ps(vi2xGHIJ, vi2xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi2x9BDF = vi2xHJLN;
const __m128 vi3xGIKM = _mm_shuffle_ps(vi3xGHIJ, vi3xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi3xHJLN = _mm_shuffle_ps(vi3xGHIJ, vi3xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi3x9BDF = vi3xHJLN;
const __m128 vi4xGIKM = _mm_shuffle_ps(vi4xGHIJ, vi4xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 vi4xHJLN = _mm_shuffle_ps(vi4xGHIJ, vi4xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
vi4x9BDF = vi4xHJLN;
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x79BD, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x79BD, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x79BD, vk21));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x79BD, vk31));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x79BD, vk41));
const __m128 vi0xGACE = _mm_move_ss(vi0x8ACE, vi0xGIKM);
vi0x8ACE = vi0xGIKM;
const __m128 vi1xGACE = _mm_move_ss(vi1x8ACE, vi1xGIKM);
vi1x8ACE = vi1xGIKM;
const __m128 vi2xGACE = _mm_move_ss(vi2x8ACE, vi2xGIKM);
vi2x8ACE = vi2xGIKM;
const __m128 vi3xGACE = _mm_move_ss(vi3x8ACE, vi3xGIKM);
vi3x8ACE = vi3xGIKM;
const __m128 vi4xGACE = _mm_move_ss(vi4x8ACE, vi4xGIKM);
vi4x8ACE = vi4xGIKM;
const __m128 vi0xACEG = _mm_shuffle_ps(vi0xGACE, vi0xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1xACEG = _mm_shuffle_ps(vi1xGACE, vi1xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2xACEG = _mm_shuffle_ps(vi2xGACE, vi2xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3xACEG = _mm_shuffle_ps(vi3xGACE, vi3xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4xACEG = _mm_shuffle_ps(vi4xGACE, vi4xGACE, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0xACEG, vk04));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1xACEG, vk14));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2xACEG, vk24));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3xACEG, vk34));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4xACEG, vk44));
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
_mm_storeu_ps(o0, vo0);
o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
vi0x8ACE = _mm_and_ps(vi0x8ACE, vmask_even);
vi0x9BDF = _mm_and_ps(vi0x9BDF, vmask_odd);
vi1x8ACE = _mm_and_ps(vi1x8ACE, vmask_even);
vi1x9BDF = _mm_and_ps(vi1x9BDF, vmask_odd);
vi2x8ACE = _mm_and_ps(vi2x8ACE, vmask_even);
vi2x9BDF = _mm_and_ps(vi2x9BDF, vmask_odd);
vi3x8ACE = _mm_and_ps(vi3x8ACE, vmask_even);
vi3x9BDF = _mm_and_ps(vi3x9BDF, vmask_odd);
vi4x8ACE = _mm_and_ps(vi4x8ACE, vmask_even);
vi4x9BDF = _mm_and_ps(vi4x9BDF, vmask_odd);
__m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk02));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x8ACE, vk12));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x8ACE, vk22));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x8ACE, vk32));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x8ACE, vk42));
const __m128 vi0xE8AC = _mm_shuffle_ps(vi0x8ACE, vi0x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xE8AC = _mm_shuffle_ps(vi1x8ACE, vi1x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xE8AC = _mm_shuffle_ps(vi2x8ACE, vi2x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3xE8AC = _mm_shuffle_ps(vi3x8ACE, vi3x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4xE8AC = _mm_shuffle_ps(vi4x8ACE, vi4x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x9BDF, vk03));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk13));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x9BDF, vk23));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x9BDF, vk33));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x9BDF, vk43));
const __m128 vi0x68AC = _mm_move_ss(vi0xE8AC, vi0x6024);
const __m128 vi1x68AC = _mm_move_ss(vi1xE8AC, vi1x6024);
const __m128 vi2x68AC = _mm_move_ss(vi2xE8AC, vi2x6024);
const __m128 vi3x68AC = _mm_move_ss(vi3xE8AC, vi3x6024);
const __m128 vi4x68AC = _mm_move_ss(vi4xE8AC, vi4x6024);
const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi3xF9BD = _mm_shuffle_ps(vi3x9BDF, vi3x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
const __m128 vi4xF9BD = _mm_shuffle_ps(vi4x9BDF, vi4x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x68AC, vk00));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x68AC, vk10));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x68AC, vk20));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x68AC, vk30));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x68AC, vk40));
const __m128 vi0x79BD = _mm_move_ss(vi0xF9BD, vi0x7135);
const __m128 vi1x79BD = _mm_move_ss(vi1xF9BD, vi1x7135);
const __m128 vi2x79BD = _mm_move_ss(vi2xF9BD, vi2x7135);
const __m128 vi3x79BD = _mm_move_ss(vi3xF9BD, vi3x7135);
const __m128 vi4x79BD = _mm_move_ss(vi4xF9BD, vi4x7135);
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x79BD, vk01));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x79BD, vk11));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x79BD, vk21));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x79BD, vk31));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x79BD, vk41));
const __m128 vzero = _mm_setzero_ps();
const __m128 vi0xGACE = _mm_move_ss(vi0x8ACE, vzero);
const __m128 vi1xGACE = _mm_move_ss(vi1x8ACE, vzero);
const __m128 vi2xGACE = _mm_move_ss(vi2x8ACE, vzero);
const __m128 vi3xGACE = _mm_move_ss(vi3x8ACE, vzero);
const __m128 vi4xGACE = _mm_move_ss(vi4x8ACE, vzero);
const __m128 vi0xACEG = _mm_shuffle_ps(vi0xGACE, vi0xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi1xACEG = _mm_shuffle_ps(vi1xGACE, vi1xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi2xACEG = _mm_shuffle_ps(vi2xGACE, vi2xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi3xACEG = _mm_shuffle_ps(vi3xGACE, vi3xGACE, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vi4xACEG = _mm_shuffle_ps(vi4xGACE, vi4xGACE, _MM_SHUFFLE(0, 3, 2, 1));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0xACEG, vk04));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1xACEG, vk14));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2xACEG, vk24));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3xACEG, vk34));
vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4xACEG, vk44));
__m128 vo0 = _mm_max_ps(vo0p0, vmin);
vo0 = _mm_min_ps(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
_mm_storeu_ps(o0, vo0);
o0 += 4;
} else {
if (w_tmp & 2) {
_mm_storel_pi((__m64*) o0, vo0);
o0 += 2;
vo0 = _mm_movehl_ps(vo0, vo0);
}
if (w_tmp & 1) {
_mm_store_ss(o0, vo0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 16,601 | 43.99187 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vbias = wasm_v128_load32_splat(weights);
const v128_t vk00 = wasm_v128_load32_splat(weights + 1);
const v128_t vk01 = wasm_v128_load32_splat(weights + 2);
const v128_t vk02 = wasm_v128_load32_splat(weights + 3);
const v128_t vk03 = wasm_v128_load32_splat(weights + 4);
const v128_t vk04 = wasm_v128_load32_splat(weights + 5);
const v128_t vk10 = wasm_v128_load32_splat(weights + 6);
const v128_t vk11 = wasm_v128_load32_splat(weights + 7);
const v128_t vk12 = wasm_v128_load32_splat(weights + 8);
const v128_t vk13 = wasm_v128_load32_splat(weights + 9);
const v128_t vk14 = wasm_v128_load32_splat(weights + 10);
const v128_t vk20 = wasm_v128_load32_splat(weights + 11);
const v128_t vk21 = wasm_v128_load32_splat(weights + 12);
const v128_t vk22 = wasm_v128_load32_splat(weights + 13);
const v128_t vk23 = wasm_v128_load32_splat(weights + 14);
const v128_t vk24 = wasm_v128_load32_splat(weights + 15);
const v128_t vk30 = wasm_v128_load32_splat(weights + 16);
const v128_t vk31 = wasm_v128_load32_splat(weights + 17);
const v128_t vk32 = wasm_v128_load32_splat(weights + 18);
const v128_t vk33 = wasm_v128_load32_splat(weights + 19);
const v128_t vk34 = wasm_v128_load32_splat(weights + 20);
const v128_t vk40 = wasm_v128_load32_splat(weights + 21);
const v128_t vk41 = wasm_v128_load32_splat(weights + 22);
const v128_t vk42 = wasm_v128_load32_splat(weights + 23);
const v128_t vk43 = wasm_v128_load32_splat(weights + 24);
const v128_t vk44 = wasm_v128_load32_splat(weights + 25);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
v128_t vi0x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x1357 = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3x89AB = wasm_v128_load(i3);
const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4x89AB = wasm_v128_load(i4);
const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
i4 += 8;
v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = vbias;
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
vi0x0246 = vi0x8ACE;
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
vi1x0246 = vi1x8ACE;
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
vi2x0246 = vi2x8ACE;
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
vi3x0246 = vi3x8ACE;
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vi4x0246 = vi4x8ACE;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
vi3x1357 = vi3x9BDF;
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vi4x1357 = vi4x9BDF;
const v128_t vi0xGHIJ = wasm_v128_load(i0);
const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1xGHIJ = wasm_v128_load(i1);
const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2xGHIJ = wasm_v128_load(i2);
const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3xGHIJ = wasm_v128_load(i3);
const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4xGHIJ = wasm_v128_load(i4);
const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
vi0x8ACE = vi0xGIKM;
vi0x9BDF = vi0xHJLN;
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
vi1x8ACE = vi1xGIKM;
vi1x9BDF = vi1xHJLN;
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
vi2x8ACE = vi2xGIKM;
vi2x9BDF = vi2xHJLN;
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
vi3x8ACE = vi3xGIKM;
vi3x9BDF = vi3xHJLN;
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
vi4x8ACE = vi4xGIKM;
vi4x9BDF = vi4xHJLN;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
v128_t vo0p0 = vbias;
vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w_tmp & 2) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w_tmp & 1) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 15,351 | 39.613757 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vbias = wasm_v128_load32_splat(weights);
const v128_t vk00 = wasm_v128_load32_splat(weights + 1);
const v128_t vk01 = wasm_v128_load32_splat(weights + 2);
const v128_t vk02 = wasm_v128_load32_splat(weights + 3);
const v128_t vk03 = wasm_v128_load32_splat(weights + 4);
const v128_t vk04 = wasm_v128_load32_splat(weights + 5);
const v128_t vk10 = wasm_v128_load32_splat(weights + 6);
const v128_t vk11 = wasm_v128_load32_splat(weights + 7);
const v128_t vk12 = wasm_v128_load32_splat(weights + 8);
const v128_t vk13 = wasm_v128_load32_splat(weights + 9);
const v128_t vk14 = wasm_v128_load32_splat(weights + 10);
const v128_t vk20 = wasm_v128_load32_splat(weights + 11);
const v128_t vk21 = wasm_v128_load32_splat(weights + 12);
const v128_t vk22 = wasm_v128_load32_splat(weights + 13);
const v128_t vk23 = wasm_v128_load32_splat(weights + 14);
const v128_t vk24 = wasm_v128_load32_splat(weights + 15);
const v128_t vk30 = wasm_v128_load32_splat(weights + 16);
const v128_t vk31 = wasm_v128_load32_splat(weights + 17);
const v128_t vk32 = wasm_v128_load32_splat(weights + 18);
const v128_t vk33 = wasm_v128_load32_splat(weights + 19);
const v128_t vk34 = wasm_v128_load32_splat(weights + 20);
const v128_t vk40 = wasm_v128_load32_splat(weights + 21);
const v128_t vk41 = wasm_v128_load32_splat(weights + 22);
const v128_t vk42 = wasm_v128_load32_splat(weights + 23);
const v128_t vk43 = wasm_v128_load32_splat(weights + 24);
const v128_t vk44 = wasm_v128_load32_splat(weights + 25);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
v128_t vi0x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x1357 = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3x89AB = wasm_v128_load(i3);
const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4x89AB = wasm_v128_load(i4);
const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
i4 += 8;
v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = vbias;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, vk43));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
vi0x0246 = vi0x8ACE;
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
vi1x0246 = vi1x8ACE;
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
vi2x0246 = vi2x8ACE;
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
vi3x0246 = vi3x8ACE;
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vi4x0246 = vi4x8ACE;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
vi3x1357 = vi3x9BDF;
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vi4x1357 = vi4x9BDF;
const v128_t vi0xGHIJ = wasm_v128_load(i0);
const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1xGHIJ = wasm_v128_load(i1);
const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2xGHIJ = wasm_v128_load(i2);
const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3xGHIJ = wasm_v128_load(i3);
const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4xGHIJ = wasm_v128_load(i4);
const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
vi0x8ACE = vi0xGIKM;
vi0x9BDF = vi0xHJLN;
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
vi1x8ACE = vi1xGIKM;
vi1x9BDF = vi1xHJLN;
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
vi2x8ACE = vi2xGIKM;
vi2x9BDF = vi2xHJLN;
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
vi3x8ACE = vi3xGIKM;
vi3x9BDF = vi3xHJLN;
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
vi4x8ACE = vi4xGIKM;
vi4x9BDF = vi4xHJLN;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, vk34));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
v128_t vo0p0 = vbias;
vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, vk43));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, vk34));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w_tmp & 2) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w_tmp & 1) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 15,290 | 39.667553 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vbias = wasm_v128_load32_splat(weights);
const v128_t vk00 = wasm_v128_load32_splat(weights + 1);
const v128_t vk01 = wasm_v128_load32_splat(weights + 2);
const v128_t vk02 = wasm_v128_load32_splat(weights + 3);
const v128_t vk03 = wasm_v128_load32_splat(weights + 4);
const v128_t vk04 = wasm_v128_load32_splat(weights + 5);
const v128_t vk10 = wasm_v128_load32_splat(weights + 6);
const v128_t vk11 = wasm_v128_load32_splat(weights + 7);
const v128_t vk12 = wasm_v128_load32_splat(weights + 8);
const v128_t vk13 = wasm_v128_load32_splat(weights + 9);
const v128_t vk14 = wasm_v128_load32_splat(weights + 10);
const v128_t vk20 = wasm_v128_load32_splat(weights + 11);
const v128_t vk21 = wasm_v128_load32_splat(weights + 12);
const v128_t vk22 = wasm_v128_load32_splat(weights + 13);
const v128_t vk23 = wasm_v128_load32_splat(weights + 14);
const v128_t vk24 = wasm_v128_load32_splat(weights + 15);
const v128_t vk30 = wasm_v128_load32_splat(weights + 16);
const v128_t vk31 = wasm_v128_load32_splat(weights + 17);
const v128_t vk32 = wasm_v128_load32_splat(weights + 18);
const v128_t vk33 = wasm_v128_load32_splat(weights + 19);
const v128_t vk34 = wasm_v128_load32_splat(weights + 20);
const v128_t vk40 = wasm_v128_load32_splat(weights + 21);
const v128_t vk41 = wasm_v128_load32_splat(weights + 22);
const v128_t vk42 = wasm_v128_load32_splat(weights + 23);
const v128_t vk43 = wasm_v128_load32_splat(weights + 24);
const v128_t vk44 = wasm_v128_load32_splat(weights + 25);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
v128_t vi0x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x1357 = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3x89AB = wasm_v128_load(i3);
const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4x89AB = wasm_v128_load(i4);
const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
i4 += 8;
v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = vbias;
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, vk22));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, vk13));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, vk33));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, vk43));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
vi0x0246 = vi0x8ACE;
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
vi1x0246 = vi1x8ACE;
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
vi2x0246 = vi2x8ACE;
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
vi3x0246 = vi3x8ACE;
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vi4x0246 = vi4x8ACE;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, vk40));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
vi3x1357 = vi3x9BDF;
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vi4x1357 = vi4x9BDF;
const v128_t vi0xGHIJ = wasm_v128_load(i0);
const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1xGHIJ = wasm_v128_load(i1);
const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2xGHIJ = wasm_v128_load(i2);
const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3xGHIJ = wasm_v128_load(i3);
const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4xGHIJ = wasm_v128_load(i4);
const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, vk31));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
vi0x8ACE = vi0xGIKM;
vi0x9BDF = vi0xHJLN;
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
vi1x8ACE = vi1xGIKM;
vi1x9BDF = vi1xHJLN;
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
vi2x8ACE = vi2xGIKM;
vi2x9BDF = vi2xHJLN;
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
vi3x8ACE = vi3xGIKM;
vi3x9BDF = vi3xHJLN;
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
vi4x8ACE = vi4xGIKM;
vi4x9BDF = vi4xHJLN;
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, vk04));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
v128_t vo0p0 = vbias;
vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, vk22));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, vk13));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, vk33));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, vk43));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, vk40));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, vk31));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, vk04));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w_tmp & 2) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w_tmp & 1) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 15,407 | 39.547368 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vbias = wasm_v128_load32_splat(weights);
const v128_t vk00 = wasm_v128_load32_splat(weights + 1);
const v128_t vk01 = wasm_v128_load32_splat(weights + 2);
const v128_t vk02 = wasm_v128_load32_splat(weights + 3);
const v128_t vk03 = wasm_v128_load32_splat(weights + 4);
const v128_t vk04 = wasm_v128_load32_splat(weights + 5);
const v128_t vk10 = wasm_v128_load32_splat(weights + 6);
const v128_t vk11 = wasm_v128_load32_splat(weights + 7);
const v128_t vk12 = wasm_v128_load32_splat(weights + 8);
const v128_t vk13 = wasm_v128_load32_splat(weights + 9);
const v128_t vk14 = wasm_v128_load32_splat(weights + 10);
const v128_t vk20 = wasm_v128_load32_splat(weights + 11);
const v128_t vk21 = wasm_v128_load32_splat(weights + 12);
const v128_t vk22 = wasm_v128_load32_splat(weights + 13);
const v128_t vk23 = wasm_v128_load32_splat(weights + 14);
const v128_t vk24 = wasm_v128_load32_splat(weights + 15);
const v128_t vk30 = wasm_v128_load32_splat(weights + 16);
const v128_t vk31 = wasm_v128_load32_splat(weights + 17);
const v128_t vk32 = wasm_v128_load32_splat(weights + 18);
const v128_t vk33 = wasm_v128_load32_splat(weights + 19);
const v128_t vk34 = wasm_v128_load32_splat(weights + 20);
const v128_t vk40 = wasm_v128_load32_splat(weights + 21);
const v128_t vk41 = wasm_v128_load32_splat(weights + 22);
const v128_t vk42 = wasm_v128_load32_splat(weights + 23);
const v128_t vk43 = wasm_v128_load32_splat(weights + 24);
const v128_t vk44 = wasm_v128_load32_splat(weights + 25);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
v128_t vi0x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x1357 = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3x89AB = wasm_v128_load(i3);
const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4x89AB = wasm_v128_load(i4);
const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
i4 += 8;
v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = vbias;
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk22);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x8ACE, vk42));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x9BDF, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x9BDF, vk33));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x9BDF, vk43));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
vi0x0246 = vi0x8ACE;
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
vi1x0246 = vi1x8ACE;
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
vi2x0246 = vi2x8ACE;
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
vi3x0246 = vi3x8ACE;
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vi4x0246 = vi4x8ACE;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, vk20));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x68AC, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
vi3x1357 = vi3x9BDF;
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vi4x1357 = vi4x9BDF;
const v128_t vi0xGHIJ = wasm_v128_load(i0);
const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1xGHIJ = wasm_v128_load(i1);
const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2xGHIJ = wasm_v128_load(i2);
const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3xGHIJ = wasm_v128_load(i3);
const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4xGHIJ = wasm_v128_load(i4);
const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, vk11));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
vi0x8ACE = vi0xGIKM;
vi0x9BDF = vi0xHJLN;
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
vi1x8ACE = vi1xGIKM;
vi1x9BDF = vi1xHJLN;
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
vi2x8ACE = vi2xGIKM;
vi2x9BDF = vi2xHJLN;
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
vi3x8ACE = vi3xGIKM;
vi3x9BDF = vi3xHJLN;
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
vi4x8ACE = vi4xGIKM;
vi4x9BDF = vi4xHJLN;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, vk04));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
v128_t vo0p0 = vbias;
vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk22);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x8ACE, vk42));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x9BDF, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x9BDF, vk33));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x9BDF, vk43));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, vk20));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x68AC, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, vk11));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, vk04));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w_tmp & 2) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w_tmp & 1) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 15,463 | 39.481675 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-wasmsimd-arm-loadsplat-1x4-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_loadsplat_1x4_acc5(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vbias = wasm_v128_load32_splat(weights);
const v128_t vk00 = wasm_v128_load32_splat(weights + 1);
const v128_t vk01 = wasm_v128_load32_splat(weights + 2);
const v128_t vk02 = wasm_v128_load32_splat(weights + 3);
const v128_t vk03 = wasm_v128_load32_splat(weights + 4);
const v128_t vk04 = wasm_v128_load32_splat(weights + 5);
const v128_t vk10 = wasm_v128_load32_splat(weights + 6);
const v128_t vk11 = wasm_v128_load32_splat(weights + 7);
const v128_t vk12 = wasm_v128_load32_splat(weights + 8);
const v128_t vk13 = wasm_v128_load32_splat(weights + 9);
const v128_t vk14 = wasm_v128_load32_splat(weights + 10);
const v128_t vk20 = wasm_v128_load32_splat(weights + 11);
const v128_t vk21 = wasm_v128_load32_splat(weights + 12);
const v128_t vk22 = wasm_v128_load32_splat(weights + 13);
const v128_t vk23 = wasm_v128_load32_splat(weights + 14);
const v128_t vk24 = wasm_v128_load32_splat(weights + 15);
const v128_t vk30 = wasm_v128_load32_splat(weights + 16);
const v128_t vk31 = wasm_v128_load32_splat(weights + 17);
const v128_t vk32 = wasm_v128_load32_splat(weights + 18);
const v128_t vk33 = wasm_v128_load32_splat(weights + 19);
const v128_t vk34 = wasm_v128_load32_splat(weights + 20);
const v128_t vk40 = wasm_v128_load32_splat(weights + 21);
const v128_t vk41 = wasm_v128_load32_splat(weights + 22);
const v128_t vk42 = wasm_v128_load32_splat(weights + 23);
const v128_t vk43 = wasm_v128_load32_splat(weights + 24);
const v128_t vk44 = wasm_v128_load32_splat(weights + 25);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
v128_t vi0x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x1357 = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3x89AB = wasm_v128_load(i3);
const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4x89AB = wasm_v128_load(i4);
const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
i4 += 8;
v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = vbias;
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk22);
v128_t vo0p4 = wasm_f32x4_mul(vi3x8ACE, vk32);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x8ACE, vk42));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, vk03));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x9BDF, vk13));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x9BDF, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
vi0x0246 = vi0x8ACE;
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
vi1x0246 = vi1x8ACE;
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
vi2x0246 = vi2x8ACE;
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
vi3x0246 = vi3x8ACE;
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vi4x0246 = vi4x8ACE;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x68AC, vk00));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x68AC, vk10));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x68AC, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, vk40));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
vi3x1357 = vi3x9BDF;
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vi4x1357 = vi4x9BDF;
const v128_t vi0xGHIJ = wasm_v128_load(i0);
const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1xGHIJ = wasm_v128_load(i1);
const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2xGHIJ = wasm_v128_load(i2);
const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3xGHIJ = wasm_v128_load(i3);
const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4xGHIJ = wasm_v128_load(i4);
const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, vk01));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x79BD, vk11));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x79BD, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
vi0x8ACE = vi0xGIKM;
vi0x9BDF = vi0xHJLN;
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
vi1x8ACE = vi1xGIKM;
vi1x9BDF = vi1xHJLN;
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
vi2x8ACE = vi2xGIKM;
vi2x9BDF = vi2xHJLN;
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
vi3x8ACE = vi3xGIKM;
vi3x9BDF = vi3xHJLN;
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
vi4x8ACE = vi4xGIKM;
vi4x9BDF = vi4xHJLN;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, vk04));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, vk14));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2xACEG, vk24));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, vk34));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4xACEG, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
v128_t vo0p0 = vbias;
vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk22);
v128_t vo0p4 = wasm_f32x4_mul(vi3x8ACE, vk32);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x8ACE, vk42));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, vk03));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x9BDF, vk13));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x9BDF, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x68AC, vk00));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x68AC, vk10));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x68AC, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, vk40));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, vk01));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x79BD, vk11));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x79BD, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, vk04));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, vk14));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2xACEG, vk24));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, vk34));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4xACEG, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w_tmp & 2) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w_tmp & 1) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 15,519 | 39.416667 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-wasmsimd-arm-splat-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
v128_t vi0x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x1357 = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3x89AB = wasm_v128_load(i3);
const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4x89AB = wasm_v128_load(i4);
const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
i4 += 8;
v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
vi0x0246 = vi0x8ACE;
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
vi1x0246 = vi1x8ACE;
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
vi2x0246 = vi2x8ACE;
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
vi3x0246 = vi3x8ACE;
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vi4x0246 = vi4x8ACE;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
vi3x1357 = vi3x9BDF;
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vi4x1357 = vi4x9BDF;
const v128_t vi0xGHIJ = wasm_v128_load(i0);
const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1xGHIJ = wasm_v128_load(i1);
const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2xGHIJ = wasm_v128_load(i2);
const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3xGHIJ = wasm_v128_load(i3);
const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4xGHIJ = wasm_v128_load(i4);
const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
vi0x8ACE = vi0xGIKM;
vi0x9BDF = vi0xHJLN;
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
vi1x8ACE = vi1xGIKM;
vi1x9BDF = vi1xHJLN;
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
vi2x8ACE = vi2xGIKM;
vi2x9BDF = vi2xHJLN;
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
vi3x8ACE = vi3xGIKM;
vi3x9BDF = vi3xHJLN;
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
vi4x8ACE = vi4xGIKM;
vi4x9BDF = vi4xHJLN;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w_tmp & 2) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w_tmp & 1) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 16,346 | 44.534819 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-wasmsimd-arm-splat-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
v128_t vi0x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x1357 = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3x89AB = wasm_v128_load(i3);
const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4x89AB = wasm_v128_load(i4);
const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
i4 += 8;
v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
vi0x0246 = vi0x8ACE;
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
vi1x0246 = vi1x8ACE;
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
vi2x0246 = vi2x8ACE;
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
vi3x0246 = vi3x8ACE;
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vi4x0246 = vi4x8ACE;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
vi3x1357 = vi3x9BDF;
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vi4x1357 = vi4x9BDF;
const v128_t vi0xGHIJ = wasm_v128_load(i0);
const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1xGHIJ = wasm_v128_load(i1);
const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2xGHIJ = wasm_v128_load(i2);
const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3xGHIJ = wasm_v128_load(i3);
const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4xGHIJ = wasm_v128_load(i4);
const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
vi0x8ACE = vi0xGIKM;
vi0x9BDF = vi0xHJLN;
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
vi1x8ACE = vi1xGIKM;
vi1x9BDF = vi1xHJLN;
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
vi2x8ACE = vi2xGIKM;
vi2x9BDF = vi2xHJLN;
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
vi3x8ACE = vi3xGIKM;
vi3x9BDF = vi3xHJLN;
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
vi4x8ACE = vi4xGIKM;
vi4x9BDF = vi4xHJLN;
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w_tmp & 2) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w_tmp & 1) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 16,402 | 44.437673 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-wasmsimd-arm-splat-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
v128_t vi0x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x1357 = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3x89AB = wasm_v128_load(i3);
const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4x89AB = wasm_v128_load(i4);
const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
i4 += 8;
v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
vi0x0246 = vi0x8ACE;
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
vi1x0246 = vi1x8ACE;
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
vi2x0246 = vi2x8ACE;
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
vi3x0246 = vi3x8ACE;
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vi4x0246 = vi4x8ACE;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
vi3x1357 = vi3x9BDF;
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vi4x1357 = vi4x9BDF;
const v128_t vi0xGHIJ = wasm_v128_load(i0);
const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1xGHIJ = wasm_v128_load(i1);
const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2xGHIJ = wasm_v128_load(i2);
const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3xGHIJ = wasm_v128_load(i3);
const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4xGHIJ = wasm_v128_load(i4);
const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
vi0x8ACE = vi0xGIKM;
vi0x9BDF = vi0xHJLN;
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
vi1x8ACE = vi1xGIKM;
vi1x9BDF = vi1xHJLN;
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
vi2x8ACE = vi2xGIKM;
vi2x9BDF = vi2xHJLN;
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
vi3x8ACE = vi3xGIKM;
vi3x9BDF = vi3xHJLN;
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
vi4x8ACE = vi4xGIKM;
vi4x9BDF = vi4xHJLN;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w_tmp & 2) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w_tmp & 1) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 16,458 | 44.341598 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-wasmsimd-arm-splat-1x4-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc5(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
v128_t vi0x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x1357 = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3x89AB = wasm_v128_load(i3);
const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4x89AB = wasm_v128_load(i4);
const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
i4 += 8;
v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
v128_t vo0p4 = wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
vi0x0246 = vi0x8ACE;
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
vi1x0246 = vi1x8ACE;
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
vi2x0246 = vi2x8ACE;
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
vi3x0246 = vi3x8ACE;
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vi4x0246 = vi4x8ACE;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
vi3x1357 = vi3x9BDF;
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vi4x1357 = vi4x9BDF;
const v128_t vi0xGHIJ = wasm_v128_load(i0);
const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1xGHIJ = wasm_v128_load(i1);
const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2xGHIJ = wasm_v128_load(i2);
const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3xGHIJ = wasm_v128_load(i3);
const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4xGHIJ = wasm_v128_load(i4);
const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
vi0x8ACE = vi0xGIKM;
vi0x9BDF = vi0xHJLN;
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
vi1x8ACE = vi1xGIKM;
vi1x9BDF = vi1xHJLN;
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
vi2x8ACE = vi2xGIKM;
vi2x9BDF = vi2xHJLN;
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
vi3x8ACE = vi3xGIKM;
vi3x9BDF = vi3xHJLN;
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
vi4x8ACE = vi4xGIKM;
vi4x9BDF = vi4xHJLN;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
v128_t vo0p4 = wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w_tmp & 2) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w_tmp & 1) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 16,514 | 44.246575 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-wasmsimd-arm-splat-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
v128_t vi0x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x1357 = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3x89AB = wasm_v128_load(i3);
const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4x89AB = wasm_v128_load(i4);
const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
i4 += 8;
v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
vi0x0246 = vi0x8ACE;
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
vi1x0246 = vi1x8ACE;
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
vi2x0246 = vi2x8ACE;
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
vi3x0246 = vi3x8ACE;
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vi4x0246 = vi4x8ACE;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
vi3x1357 = vi3x9BDF;
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vi4x1357 = vi4x9BDF;
const v128_t vi0xGHIJ = wasm_v128_load(i0);
const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1xGHIJ = wasm_v128_load(i1);
const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2xGHIJ = wasm_v128_load(i2);
const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3xGHIJ = wasm_v128_load(i3);
const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4xGHIJ = wasm_v128_load(i4);
const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
vi0x8ACE = vi0xGIKM;
vi0x9BDF = vi0xHJLN;
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
vi1x8ACE = vi1xGIKM;
vi1x9BDF = vi1xHJLN;
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
vi2x8ACE = vi2xGIKM;
vi2x9BDF = vi2xHJLN;
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
vi3x8ACE = vi3xGIKM;
vi3x9BDF = vi3xHJLN;
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
vi4x8ACE = vi4xGIKM;
vi4x9BDF = vi4xHJLN;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
vo0 = wasm_f32x4_min(vo0, vmax);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w_tmp & 2) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w_tmp & 1) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 16,285 | 44.619048 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vbias = wasm_v128_load32_splat(weights);
const v128_t vk00 = wasm_v128_load32_splat(weights + 1);
const v128_t vk01 = wasm_v128_load32_splat(weights + 2);
const v128_t vk02 = wasm_v128_load32_splat(weights + 3);
const v128_t vk03 = wasm_v128_load32_splat(weights + 4);
const v128_t vk04 = wasm_v128_load32_splat(weights + 5);
const v128_t vk10 = wasm_v128_load32_splat(weights + 6);
const v128_t vk11 = wasm_v128_load32_splat(weights + 7);
const v128_t vk12 = wasm_v128_load32_splat(weights + 8);
const v128_t vk13 = wasm_v128_load32_splat(weights + 9);
const v128_t vk14 = wasm_v128_load32_splat(weights + 10);
const v128_t vk20 = wasm_v128_load32_splat(weights + 11);
const v128_t vk21 = wasm_v128_load32_splat(weights + 12);
const v128_t vk22 = wasm_v128_load32_splat(weights + 13);
const v128_t vk23 = wasm_v128_load32_splat(weights + 14);
const v128_t vk24 = wasm_v128_load32_splat(weights + 15);
const v128_t vk30 = wasm_v128_load32_splat(weights + 16);
const v128_t vk31 = wasm_v128_load32_splat(weights + 17);
const v128_t vk32 = wasm_v128_load32_splat(weights + 18);
const v128_t vk33 = wasm_v128_load32_splat(weights + 19);
const v128_t vk34 = wasm_v128_load32_splat(weights + 20);
const v128_t vk40 = wasm_v128_load32_splat(weights + 21);
const v128_t vk41 = wasm_v128_load32_splat(weights + 22);
const v128_t vk42 = wasm_v128_load32_splat(weights + 23);
const v128_t vk43 = wasm_v128_load32_splat(weights + 24);
const v128_t vk44 = wasm_v128_load32_splat(weights + 25);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
v128_t vi0x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x1357 = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3x89AB = wasm_v128_load(i3);
const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4x89AB = wasm_v128_load(i4);
const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
i4 += 8;
v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = vbias;
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
vi0x0246 = vi0x8ACE;
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
vi1x0246 = vi1x8ACE;
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
vi2x0246 = vi2x8ACE;
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
vi3x0246 = vi3x8ACE;
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vi4x0246 = vi4x8ACE;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
vi3x1357 = vi3x9BDF;
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vi4x1357 = vi4x9BDF;
const v128_t vi0xGHIJ = wasm_v128_load(i0);
const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1xGHIJ = wasm_v128_load(i1);
const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2xGHIJ = wasm_v128_load(i2);
const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3xGHIJ = wasm_v128_load(i3);
const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4xGHIJ = wasm_v128_load(i4);
const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
vi0x8ACE = vi0xGIKM;
vi0x9BDF = vi0xHJLN;
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
vi1x8ACE = vi1xGIKM;
vi1x9BDF = vi1xHJLN;
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
vi2x8ACE = vi2xGIKM;
vi2x9BDF = vi2xHJLN;
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
vi3x8ACE = vi3xGIKM;
vi3x9BDF = vi3xHJLN;
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
vi4x8ACE = vi4xGIKM;
vi4x9BDF = vi4xHJLN;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
v128_t vo0p0 = vbias;
vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w_tmp & 2) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w_tmp & 1) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 15,355 | 39.624339 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vbias = wasm_v128_load32_splat(weights);
const v128_t vk00 = wasm_v128_load32_splat(weights + 1);
const v128_t vk01 = wasm_v128_load32_splat(weights + 2);
const v128_t vk02 = wasm_v128_load32_splat(weights + 3);
const v128_t vk03 = wasm_v128_load32_splat(weights + 4);
const v128_t vk04 = wasm_v128_load32_splat(weights + 5);
const v128_t vk10 = wasm_v128_load32_splat(weights + 6);
const v128_t vk11 = wasm_v128_load32_splat(weights + 7);
const v128_t vk12 = wasm_v128_load32_splat(weights + 8);
const v128_t vk13 = wasm_v128_load32_splat(weights + 9);
const v128_t vk14 = wasm_v128_load32_splat(weights + 10);
const v128_t vk20 = wasm_v128_load32_splat(weights + 11);
const v128_t vk21 = wasm_v128_load32_splat(weights + 12);
const v128_t vk22 = wasm_v128_load32_splat(weights + 13);
const v128_t vk23 = wasm_v128_load32_splat(weights + 14);
const v128_t vk24 = wasm_v128_load32_splat(weights + 15);
const v128_t vk30 = wasm_v128_load32_splat(weights + 16);
const v128_t vk31 = wasm_v128_load32_splat(weights + 17);
const v128_t vk32 = wasm_v128_load32_splat(weights + 18);
const v128_t vk33 = wasm_v128_load32_splat(weights + 19);
const v128_t vk34 = wasm_v128_load32_splat(weights + 20);
const v128_t vk40 = wasm_v128_load32_splat(weights + 21);
const v128_t vk41 = wasm_v128_load32_splat(weights + 22);
const v128_t vk42 = wasm_v128_load32_splat(weights + 23);
const v128_t vk43 = wasm_v128_load32_splat(weights + 24);
const v128_t vk44 = wasm_v128_load32_splat(weights + 25);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
v128_t vi0x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x1357 = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3x89AB = wasm_v128_load(i3);
const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4x89AB = wasm_v128_load(i4);
const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
i4 += 8;
v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = vbias;
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, vk22));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, vk13));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, vk33));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, vk43));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
vi0x0246 = vi0x8ACE;
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
vi1x0246 = vi1x8ACE;
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
vi2x0246 = vi2x8ACE;
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
vi3x0246 = vi3x8ACE;
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vi4x0246 = vi4x8ACE;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, vk40));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
vi3x1357 = vi3x9BDF;
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vi4x1357 = vi4x9BDF;
const v128_t vi0xGHIJ = wasm_v128_load(i0);
const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1xGHIJ = wasm_v128_load(i1);
const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2xGHIJ = wasm_v128_load(i2);
const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3xGHIJ = wasm_v128_load(i3);
const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4xGHIJ = wasm_v128_load(i4);
const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, vk31));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
vi0x8ACE = vi0xGIKM;
vi0x9BDF = vi0xHJLN;
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
vi1x8ACE = vi1xGIKM;
vi1x9BDF = vi1xHJLN;
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
vi2x8ACE = vi2xGIKM;
vi2x9BDF = vi2xHJLN;
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
vi3x8ACE = vi3xGIKM;
vi3x9BDF = vi3xHJLN;
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
vi4x8ACE = vi4xGIKM;
vi4x9BDF = vi4xHJLN;
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, vk04));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
v128_t vo0p0 = vbias;
vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, vk22));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, vk13));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, vk33));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, vk43));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, vk40));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, vk31));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, vk04));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w_tmp & 2) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w_tmp & 1) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 15,411 | 39.557895 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vbias = wasm_v128_load32_splat(weights);
const v128_t vk00 = wasm_v128_load32_splat(weights + 1);
const v128_t vk01 = wasm_v128_load32_splat(weights + 2);
const v128_t vk02 = wasm_v128_load32_splat(weights + 3);
const v128_t vk03 = wasm_v128_load32_splat(weights + 4);
const v128_t vk04 = wasm_v128_load32_splat(weights + 5);
const v128_t vk10 = wasm_v128_load32_splat(weights + 6);
const v128_t vk11 = wasm_v128_load32_splat(weights + 7);
const v128_t vk12 = wasm_v128_load32_splat(weights + 8);
const v128_t vk13 = wasm_v128_load32_splat(weights + 9);
const v128_t vk14 = wasm_v128_load32_splat(weights + 10);
const v128_t vk20 = wasm_v128_load32_splat(weights + 11);
const v128_t vk21 = wasm_v128_load32_splat(weights + 12);
const v128_t vk22 = wasm_v128_load32_splat(weights + 13);
const v128_t vk23 = wasm_v128_load32_splat(weights + 14);
const v128_t vk24 = wasm_v128_load32_splat(weights + 15);
const v128_t vk30 = wasm_v128_load32_splat(weights + 16);
const v128_t vk31 = wasm_v128_load32_splat(weights + 17);
const v128_t vk32 = wasm_v128_load32_splat(weights + 18);
const v128_t vk33 = wasm_v128_load32_splat(weights + 19);
const v128_t vk34 = wasm_v128_load32_splat(weights + 20);
const v128_t vk40 = wasm_v128_load32_splat(weights + 21);
const v128_t vk41 = wasm_v128_load32_splat(weights + 22);
const v128_t vk42 = wasm_v128_load32_splat(weights + 23);
const v128_t vk43 = wasm_v128_load32_splat(weights + 24);
const v128_t vk44 = wasm_v128_load32_splat(weights + 25);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
v128_t vi0x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x1357 = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3x89AB = wasm_v128_load(i3);
const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4x89AB = wasm_v128_load(i4);
const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
i4 += 8;
v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = vbias;
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk22);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x8ACE, vk42));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x9BDF, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x9BDF, vk33));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x9BDF, vk43));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
vi0x0246 = vi0x8ACE;
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
vi1x0246 = vi1x8ACE;
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
vi2x0246 = vi2x8ACE;
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
vi3x0246 = vi3x8ACE;
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vi4x0246 = vi4x8ACE;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, vk20));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x68AC, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
vi3x1357 = vi3x9BDF;
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vi4x1357 = vi4x9BDF;
const v128_t vi0xGHIJ = wasm_v128_load(i0);
const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1xGHIJ = wasm_v128_load(i1);
const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2xGHIJ = wasm_v128_load(i2);
const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3xGHIJ = wasm_v128_load(i3);
const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4xGHIJ = wasm_v128_load(i4);
const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, vk11));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
vi0x8ACE = vi0xGIKM;
vi0x9BDF = vi0xHJLN;
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
vi1x8ACE = vi1xGIKM;
vi1x9BDF = vi1xHJLN;
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
vi2x8ACE = vi2xGIKM;
vi2x9BDF = vi2xHJLN;
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
vi3x8ACE = vi3xGIKM;
vi3x9BDF = vi3xHJLN;
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
vi4x8ACE = vi4xGIKM;
vi4x9BDF = vi4xHJLN;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, vk04));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
v128_t vo0p0 = vbias;
vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk22);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x8ACE, vk42));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x9BDF, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x9BDF, vk33));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x9BDF, vk43));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, vk20));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x68AC, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, vk11));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, vk04));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w_tmp & 2) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w_tmp & 1) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 15,467 | 39.492147 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc5(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vbias = wasm_v128_load32_splat(weights);
const v128_t vk00 = wasm_v128_load32_splat(weights + 1);
const v128_t vk01 = wasm_v128_load32_splat(weights + 2);
const v128_t vk02 = wasm_v128_load32_splat(weights + 3);
const v128_t vk03 = wasm_v128_load32_splat(weights + 4);
const v128_t vk04 = wasm_v128_load32_splat(weights + 5);
const v128_t vk10 = wasm_v128_load32_splat(weights + 6);
const v128_t vk11 = wasm_v128_load32_splat(weights + 7);
const v128_t vk12 = wasm_v128_load32_splat(weights + 8);
const v128_t vk13 = wasm_v128_load32_splat(weights + 9);
const v128_t vk14 = wasm_v128_load32_splat(weights + 10);
const v128_t vk20 = wasm_v128_load32_splat(weights + 11);
const v128_t vk21 = wasm_v128_load32_splat(weights + 12);
const v128_t vk22 = wasm_v128_load32_splat(weights + 13);
const v128_t vk23 = wasm_v128_load32_splat(weights + 14);
const v128_t vk24 = wasm_v128_load32_splat(weights + 15);
const v128_t vk30 = wasm_v128_load32_splat(weights + 16);
const v128_t vk31 = wasm_v128_load32_splat(weights + 17);
const v128_t vk32 = wasm_v128_load32_splat(weights + 18);
const v128_t vk33 = wasm_v128_load32_splat(weights + 19);
const v128_t vk34 = wasm_v128_load32_splat(weights + 20);
const v128_t vk40 = wasm_v128_load32_splat(weights + 21);
const v128_t vk41 = wasm_v128_load32_splat(weights + 22);
const v128_t vk42 = wasm_v128_load32_splat(weights + 23);
const v128_t vk43 = wasm_v128_load32_splat(weights + 24);
const v128_t vk44 = wasm_v128_load32_splat(weights + 25);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
v128_t vi0x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x1357 = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3x89AB = wasm_v128_load(i3);
const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4x89AB = wasm_v128_load(i4);
const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
i4 += 8;
v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = vbias;
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk22);
v128_t vo0p4 = wasm_f32x4_mul(vi3x8ACE, vk32);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x8ACE, vk42));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, vk03));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x9BDF, vk13));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x9BDF, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
vi0x0246 = vi0x8ACE;
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
vi1x0246 = vi1x8ACE;
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
vi2x0246 = vi2x8ACE;
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
vi3x0246 = vi3x8ACE;
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vi4x0246 = vi4x8ACE;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x68AC, vk00));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x68AC, vk10));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x68AC, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, vk40));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
vi3x1357 = vi3x9BDF;
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vi4x1357 = vi4x9BDF;
const v128_t vi0xGHIJ = wasm_v128_load(i0);
const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1xGHIJ = wasm_v128_load(i1);
const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2xGHIJ = wasm_v128_load(i2);
const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3xGHIJ = wasm_v128_load(i3);
const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4xGHIJ = wasm_v128_load(i4);
const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, vk01));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x79BD, vk11));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x79BD, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
vi0x8ACE = vi0xGIKM;
vi0x9BDF = vi0xHJLN;
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
vi1x8ACE = vi1xGIKM;
vi1x9BDF = vi1xHJLN;
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
vi2x8ACE = vi2xGIKM;
vi2x9BDF = vi2xHJLN;
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
vi3x8ACE = vi3xGIKM;
vi3x9BDF = vi3xHJLN;
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
vi4x8ACE = vi4xGIKM;
vi4x9BDF = vi4xHJLN;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, vk04));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, vk14));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2xACEG, vk24));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, vk34));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4xACEG, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
v128_t vo0p0 = vbias;
vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk22);
v128_t vo0p4 = wasm_f32x4_mul(vi3x8ACE, vk32);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x8ACE, vk42));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, vk03));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x9BDF, vk13));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x9BDF, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x68AC, vk00));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x68AC, vk10));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x68AC, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, vk40));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, vk01));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x79BD, vk11));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x79BD, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, vk04));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, vk14));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2xACEG, vk24));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, vk34));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4xACEG, vk44));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w_tmp & 2) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w_tmp & 1) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 15,523 | 39.427083 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-wasmsimd-x86-loadsplat-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vbias = wasm_v128_load32_splat(weights);
const v128_t vk00 = wasm_v128_load32_splat(weights + 1);
const v128_t vk01 = wasm_v128_load32_splat(weights + 2);
const v128_t vk02 = wasm_v128_load32_splat(weights + 3);
const v128_t vk03 = wasm_v128_load32_splat(weights + 4);
const v128_t vk04 = wasm_v128_load32_splat(weights + 5);
const v128_t vk10 = wasm_v128_load32_splat(weights + 6);
const v128_t vk11 = wasm_v128_load32_splat(weights + 7);
const v128_t vk12 = wasm_v128_load32_splat(weights + 8);
const v128_t vk13 = wasm_v128_load32_splat(weights + 9);
const v128_t vk14 = wasm_v128_load32_splat(weights + 10);
const v128_t vk20 = wasm_v128_load32_splat(weights + 11);
const v128_t vk21 = wasm_v128_load32_splat(weights + 12);
const v128_t vk22 = wasm_v128_load32_splat(weights + 13);
const v128_t vk23 = wasm_v128_load32_splat(weights + 14);
const v128_t vk24 = wasm_v128_load32_splat(weights + 15);
const v128_t vk30 = wasm_v128_load32_splat(weights + 16);
const v128_t vk31 = wasm_v128_load32_splat(weights + 17);
const v128_t vk32 = wasm_v128_load32_splat(weights + 18);
const v128_t vk33 = wasm_v128_load32_splat(weights + 19);
const v128_t vk34 = wasm_v128_load32_splat(weights + 20);
const v128_t vk40 = wasm_v128_load32_splat(weights + 21);
const v128_t vk41 = wasm_v128_load32_splat(weights + 22);
const v128_t vk42 = wasm_v128_load32_splat(weights + 23);
const v128_t vk43 = wasm_v128_load32_splat(weights + 24);
const v128_t vk44 = wasm_v128_load32_splat(weights + 25);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
v128_t vi0x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x1357 = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3x89AB = wasm_v128_load(i3);
const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4x89AB = wasm_v128_load(i4);
const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
i4 += 8;
v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = vbias;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, vk43));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
vi0x0246 = vi0x8ACE;
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
vi1x0246 = vi1x8ACE;
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
vi2x0246 = vi2x8ACE;
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
vi3x0246 = vi3x8ACE;
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vi4x0246 = vi4x8ACE;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
vi3x1357 = vi3x9BDF;
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vi4x1357 = vi4x9BDF;
const v128_t vi0xGHIJ = wasm_v128_load(i0);
const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1xGHIJ = wasm_v128_load(i1);
const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2xGHIJ = wasm_v128_load(i2);
const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3xGHIJ = wasm_v128_load(i3);
const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4xGHIJ = wasm_v128_load(i4);
const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
vi0x8ACE = vi0xGIKM;
vi0x9BDF = vi0xHJLN;
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
vi1x8ACE = vi1xGIKM;
vi1x9BDF = vi1xHJLN;
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
vi2x8ACE = vi2xGIKM;
vi2x9BDF = vi2xHJLN;
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
vi3x8ACE = vi3xGIKM;
vi3x9BDF = vi3xHJLN;
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
vi4x8ACE = vi4xGIKM;
vi4x9BDF = vi4xHJLN;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, vk34));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
v128_t vo0p0 = vbias;
vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk02));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, vk32));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk03));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk23));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, vk43));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, vk10));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, vk30));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, vk01));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, vk21));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, vk41));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, vk14));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, vk34));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
wasm_v128_store(o0, vo0); o0 += 4;
} else {
if (w_tmp & 2) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w_tmp & 1) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 15,294 | 39.678191 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-wasmsimd-x86-splat-1x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc2(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
v128_t vi0x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x1357 = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3x89AB = wasm_v128_load(i3);
const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4x89AB = wasm_v128_load(i4);
const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
i4 += 8;
v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
vi0x0246 = vi0x8ACE;
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
vi1x0246 = vi1x8ACE;
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
vi2x0246 = vi2x8ACE;
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
vi3x0246 = vi3x8ACE;
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vi4x0246 = vi4x8ACE;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
vi3x1357 = vi3x9BDF;
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vi4x1357 = vi4x9BDF;
const v128_t vi0xGHIJ = wasm_v128_load(i0);
const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1xGHIJ = wasm_v128_load(i1);
const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2xGHIJ = wasm_v128_load(i2);
const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3xGHIJ = wasm_v128_load(i3);
const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4xGHIJ = wasm_v128_load(i4);
const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
vi0x8ACE = vi0xGIKM;
vi0x9BDF = vi0xHJLN;
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
vi1x8ACE = vi1xGIKM;
vi1x9BDF = vi1xHJLN;
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
vi2x8ACE = vi2xGIKM;
vi2x9BDF = vi2xHJLN;
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
vi3x8ACE = vi3xGIKM;
vi3x9BDF = vi3xHJLN;
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
vi4x8ACE = vi4xGIKM;
vi4x9BDF = vi4xHJLN;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w_tmp & 2) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w_tmp & 1) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 16,350 | 44.545961 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-wasmsimd-x86-splat-1x4-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc3(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
v128_t vi0x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x1357 = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3x89AB = wasm_v128_load(i3);
const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4x89AB = wasm_v128_load(i4);
const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
i4 += 8;
v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
vi0x0246 = vi0x8ACE;
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
vi1x0246 = vi1x8ACE;
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
vi2x0246 = vi2x8ACE;
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
vi3x0246 = vi3x8ACE;
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vi4x0246 = vi4x8ACE;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
vi3x1357 = vi3x9BDF;
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vi4x1357 = vi4x9BDF;
const v128_t vi0xGHIJ = wasm_v128_load(i0);
const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1xGHIJ = wasm_v128_load(i1);
const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2xGHIJ = wasm_v128_load(i2);
const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3xGHIJ = wasm_v128_load(i3);
const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4xGHIJ = wasm_v128_load(i4);
const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
vi0x8ACE = vi0xGIKM;
vi0x9BDF = vi0xHJLN;
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
vi1x8ACE = vi1xGIKM;
vi1x9BDF = vi1xHJLN;
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
vi2x8ACE = vi2xGIKM;
vi2x9BDF = vi2xHJLN;
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
vi3x8ACE = vi3xGIKM;
vi3x9BDF = vi3xHJLN;
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
vi4x8ACE = vi4xGIKM;
vi4x9BDF = vi4xHJLN;
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w_tmp & 2) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w_tmp & 1) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 16,406 | 44.448753 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-wasmsimd-x86-splat-1x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
v128_t vi0x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x1357 = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3x89AB = wasm_v128_load(i3);
const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4x89AB = wasm_v128_load(i4);
const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
i4 += 8;
v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
vi0x0246 = vi0x8ACE;
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
vi1x0246 = vi1x8ACE;
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
vi2x0246 = vi2x8ACE;
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
vi3x0246 = vi3x8ACE;
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vi4x0246 = vi4x8ACE;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
vi3x1357 = vi3x9BDF;
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vi4x1357 = vi4x9BDF;
const v128_t vi0xGHIJ = wasm_v128_load(i0);
const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1xGHIJ = wasm_v128_load(i1);
const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2xGHIJ = wasm_v128_load(i2);
const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3xGHIJ = wasm_v128_load(i3);
const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4xGHIJ = wasm_v128_load(i4);
const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
vi0x8ACE = vi0xGIKM;
vi0x9BDF = vi0xHJLN;
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
vi1x8ACE = vi1xGIKM;
vi1x9BDF = vi1xHJLN;
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
vi2x8ACE = vi2xGIKM;
vi2x9BDF = vi2xHJLN;
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
vi3x8ACE = vi3xGIKM;
vi3x9BDF = vi3xHJLN;
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
vi4x8ACE = vi4xGIKM;
vi4x9BDF = vi4xHJLN;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w_tmp & 2) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w_tmp & 1) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 16,462 | 44.352617 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-wasmsimd-x86-splat-1x4-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4_acc5(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
v128_t vi0x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x1357 = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3x89AB = wasm_v128_load(i3);
const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4x89AB = wasm_v128_load(i4);
const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
i4 += 8;
v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
v128_t vo0p4 = wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
vi0x0246 = vi0x8ACE;
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
vi1x0246 = vi1x8ACE;
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
vi2x0246 = vi2x8ACE;
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
vi3x0246 = vi3x8ACE;
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vi4x0246 = vi4x8ACE;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
vi3x1357 = vi3x9BDF;
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vi4x1357 = vi4x9BDF;
const v128_t vi0xGHIJ = wasm_v128_load(i0);
const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1xGHIJ = wasm_v128_load(i1);
const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2xGHIJ = wasm_v128_load(i2);
const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3xGHIJ = wasm_v128_load(i3);
const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4xGHIJ = wasm_v128_load(i4);
const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
vi0x8ACE = vi0xGIKM;
vi0x9BDF = vi0xHJLN;
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
vi1x8ACE = vi1xGIKM;
vi1x9BDF = vi1xHJLN;
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
vi2x8ACE = vi2xGIKM;
vi2x9BDF = vi2xHJLN;
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
vi3x8ACE = vi3xGIKM;
vi3x9BDF = vi3xHJLN;
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
vi4x8ACE = vi4xGIKM;
vi4x9BDF = vi4xHJLN;
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
v128_t vo0p4 = wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w_tmp & 2) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w_tmp & 1) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 16,518 | 44.257534 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv2d-chw/gen/f32-dwconv2d-chw-5x5s2p2-minmax-wasmsimd-x86-splat-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_splat_1x4(
size_t input_height,
size_t input_width,
const float* input,
const float* weights,
const float* zero,
float* output,
uint32_t padding_top,
const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_height != 0);
assert(input_width != 0);
assert(input_width % sizeof(float) == 0);
assert(padding_top >= 1);
assert(padding_top <= 2);
const v128_t vmask_even = wasm_v128_load(params->wasmsimd_stride2.mask_even);
const v128_t vmask_odd = wasm_v128_load(params->wasmsimd_stride2.mask_odd);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd_stride2.max);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd_stride2.min);
const v128_t vw0123 = wasm_v128_load(weights);
const v128_t vw4567 = wasm_v128_load(weights + 4);
const v128_t vw89AB = wasm_v128_load(weights + 8);
const v128_t vwCDEF = wasm_v128_load(weights + 12);
const v128_t vwGHIJ = wasm_v128_load(weights + 16);
const v128_t vwKLMN = wasm_v128_load(weights + 20);
const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
const uint32_t padding_top_less_1 = padding_top - 1;
const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
const float* i0 = zero;
const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
i1 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
float* o0 = output;
size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
do {
if XNN_UNPREDICTABLE(padded_input_height < 6) {
i3 = zero;
}
if XNN_UNPREDICTABLE(padded_input_height < 7) {
i4 = zero;
}
v128_t vi0x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x0246 = wasm_f32x4_const_splat(0.0f);
v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi3x1357 = wasm_f32x4_const_splat(0.0f);
v128_t vi4x1357 = wasm_f32x4_const_splat(0.0f);
const v128_t vi0x89AB = wasm_v128_load(i0);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x89AB = wasm_v128_load(i1);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x89AB = wasm_v128_load(i2);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3x89AB = wasm_v128_load(i3);
const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4x89AB = wasm_v128_load(i4);
const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
i4 += 8;
v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
size_t w = input_width;
for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
vi0x0246 = vi0x8ACE;
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
vi1x0246 = vi1x8ACE;
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
vi2x0246 = vi2x8ACE;
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
vi3x0246 = vi3x8ACE;
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vi4x0246 = vi4x8ACE;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
vi0x1357 = vi0x9BDF;
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
vi1x1357 = vi1x9BDF;
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
vi2x1357 = vi2x9BDF;
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
vi3x1357 = vi3x9BDF;
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vi4x1357 = vi4x9BDF;
const v128_t vi0xGHIJ = wasm_v128_load(i0);
const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1xGHIJ = wasm_v128_load(i1);
const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2xGHIJ = wasm_v128_load(i2);
const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3xGHIJ = wasm_v128_load(i3);
const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
i3 += 8;
const v128_t vi4xGHIJ = wasm_v128_load(i4);
const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
i4 += 8;
const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
vi0x8ACE = vi0xGIKM;
vi0x9BDF = vi0xHJLN;
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
vi1x8ACE = vi1xGIKM;
vi1x9BDF = vi1xHJLN;
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
vi2x8ACE = vi2xGIKM;
vi2x9BDF = vi2xHJLN;
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
vi3x8ACE = vi3xGIKM;
vi3x9BDF = vi3xHJLN;
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
vi4x8ACE = vi4xGIKM;
vi4x9BDF = vi4xHJLN;
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
wasm_v128_store(o0, vo0); o0 += 4;
}
// Last block has 1-8 pixels to process.
assert(w <= 8 * sizeof(float));
assert(w >= 1 * sizeof(float));
{
v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
vo0 = wasm_f32x4_pmin(vmax, vo0);
size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
if XNN_LIKELY(w_tmp >= 4) {
wasm_v128_store(o0, vo0);
o0 += 4;
} else {
if (w_tmp & 2) {
wasm_v128_store64_lane(o0, vo0, 0);
o0 += 2;
vo0 = wasm_v64x2_shuffle(vo0, vo0, 1, 1);
}
if (w_tmp & 1) {
wasm_v128_store32_lane(o0, vo0, 0);
o0 += 1;
}
}
}
i0 = (const float*) ((uintptr_t) i2 - input_decrement);
i1 = (const float*) ((uintptr_t) i3 - input_decrement);
i2 = (const float*) ((uintptr_t) i4 - input_decrement);
i3 = (const float*) ((uintptr_t) i2 + input_width);
i4 = (const float*) ((uintptr_t) i3 + input_width);
output_height -= 1;
padded_input_height -= 2;
} while (output_height != 0);
}
| 16,289 | 44.630252 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__avx_x16(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vnonsign_mask = _mm_load_ps((const float*) params->sse2.nonsign_mask);
const __m128i vexp_bias = _mm_load_si128((const __m128i*) params->sse2.exp_bias);
const __m128 vscale_to_inf = _mm_load_ps(params->sse2.scale_to_inf);
const __m128i vexpw_max = _mm_load_si128((const __m128i*) params->sse2.expw_max);
const __m128 vscale_to_zero = _mm_load_ps(params->sse2.scale_to_zero);
const __m128i vbias_min = _mm_load_si128((const __m128i*) params->sse2.bias_min);
const __m128i vmanth_mask = _mm_load_si128((const __m128i*) params->sse2.manth_mask);
const __m128i vexph_mask = _mm_load_si128((const __m128i*) params->sse2.exph_mask);
const __m128i vnanh = _mm_load_si128((const __m128i*) params->sse2.nanh);
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m128 vx0 = _mm_loadu_ps(input);
const __m128 vx1 = _mm_loadu_ps(input + 4);
const __m128 vx2 = _mm_loadu_ps(input + 8);
const __m128 vx3 = _mm_loadu_ps(input + 12);
input += 16;
const __m128 vabsx0 = _mm_and_ps(vx0, vnonsign_mask);
const __m128 vabsx1 = _mm_and_ps(vx1, vnonsign_mask);
const __m128 vabsx2 = _mm_and_ps(vx2, vnonsign_mask);
const __m128 vabsx3 = _mm_and_ps(vx3, vnonsign_mask);
const __m128 vsignx0 = _mm_xor_ps(vx0, vabsx0);
const __m128 vsignx1 = _mm_xor_ps(vx1, vabsx1);
const __m128 vsignx2 = _mm_xor_ps(vx2, vabsx2);
const __m128 vsignx3 = _mm_xor_ps(vx3, vabsx3);
__m128i vbias0 = _mm_add_epi32(_mm_castps_si128(vabsx0), vexp_bias);
__m128i vbias1 = _mm_add_epi32(_mm_castps_si128(vabsx1), vexp_bias);
__m128i vbias2 = _mm_add_epi32(_mm_castps_si128(vabsx2), vexp_bias);
__m128i vbias3 = _mm_add_epi32(_mm_castps_si128(vabsx3), vexp_bias);
__m128 vf0 = _mm_mul_ps(vabsx0, vscale_to_inf);
__m128 vf1 = _mm_mul_ps(vabsx1, vscale_to_inf);
__m128 vf2 = _mm_mul_ps(vabsx2, vscale_to_inf);
__m128 vf3 = _mm_mul_ps(vabsx3, vscale_to_inf);
const __m128i vnanmaskw0 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx0), vexpw_max);
const __m128i vnanmaskw1 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx1), vexpw_max);
const __m128i vnanmaskw2 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx2), vexpw_max);
const __m128i vnanmaskw3 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx3), vexpw_max);
vbias0 = _mm_and_si128(vbias0, vexpw_max);
vbias1 = _mm_and_si128(vbias1, vexpw_max);
vbias2 = _mm_and_si128(vbias2, vexpw_max);
vbias3 = _mm_and_si128(vbias3, vexpw_max);
vf0 = _mm_mul_ps(vf0, vscale_to_zero);
vf1 = _mm_mul_ps(vf1, vscale_to_zero);
vf2 = _mm_mul_ps(vf2, vscale_to_zero);
vf3 = _mm_mul_ps(vf3, vscale_to_zero);
const __m128i vnanmaskh0 = _mm_packs_epi32(vnanmaskw0, vnanmaskw1);
const __m128i vnanmaskh1 = _mm_packs_epi32(vnanmaskw2, vnanmaskw3);
const __m128i vsignh0 = _mm_packs_epi32(_mm_castps_si128(vsignx0), _mm_castps_si128(vsignx1));
const __m128i vsignh1 = _mm_packs_epi32(_mm_castps_si128(vsignx2), _mm_castps_si128(vsignx3));
vbias0 = _mm_max_epi16(vbias0, vbias_min);
vbias1 = _mm_max_epi16(vbias1, vbias_min);
vbias2 = _mm_max_epi16(vbias2, vbias_min);
vbias3 = _mm_max_epi16(vbias3, vbias_min);
vf0 = _mm_add_ps(vf0, _mm_castsi128_ps(vbias0));
vf1 = _mm_add_ps(vf1, _mm_castsi128_ps(vbias1));
vf2 = _mm_add_ps(vf2, _mm_castsi128_ps(vbias2));
vf3 = _mm_add_ps(vf3, _mm_castsi128_ps(vbias3));
__m128i vexpw0 = _mm_srli_epi32(_mm_castps_si128(vf0), 13);
__m128i vexpw1 = _mm_srli_epi32(_mm_castps_si128(vf1), 13);
__m128i vexpw2 = _mm_srli_epi32(_mm_castps_si128(vf2), 13);
__m128i vexpw3 = _mm_srli_epi32(_mm_castps_si128(vf3), 13);
const __m128i vmantw0 = _mm_and_si128(_mm_castps_si128(vf0), vmanth_mask);
const __m128i vmantw1 = _mm_and_si128(_mm_castps_si128(vf1), vmanth_mask);
const __m128i vmantw2 = _mm_and_si128(_mm_castps_si128(vf2), vmanth_mask);
const __m128i vmantw3 = _mm_and_si128(_mm_castps_si128(vf3), vmanth_mask);
vexpw0 = _mm_and_si128(vexpw0, vexph_mask);
vexpw1 = _mm_and_si128(vexpw1, vexph_mask);
vexpw2 = _mm_and_si128(vexpw2, vexph_mask);
vexpw3 = _mm_and_si128(vexpw3, vexph_mask);
const __m128i vnonsignw0 = _mm_add_epi32(vmantw0, vexpw0);
const __m128i vnonsignw1 = _mm_add_epi32(vmantw1, vexpw1);
const __m128i vnonsignw2 = _mm_add_epi32(vmantw2, vexpw2);
const __m128i vnonsignw3 = _mm_add_epi32(vmantw3, vexpw3);
const __m128i vnonsignh0 = _mm_packs_epi32(vnonsignw0, vnonsignw1);
const __m128i vnonsignh1 = _mm_packs_epi32(vnonsignw2, vnonsignw3);
const __m128i vabsh0 = _mm_blendv_epi8(vnonsignh0, vnanh, vnanmaskh0);
const __m128i vabsh1 = _mm_blendv_epi8(vnonsignh1, vnanh, vnanmaskh1);
const __m128i vh0 = _mm_or_si128(vabsh0, vsignh0);
const __m128i vh1 = _mm_or_si128(vabsh1, vsignh1);
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
o += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx_lo = _mm_loadu_ps(input);
const __m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
const __m128i vabsh = _mm_blendv_epi8(vnonsignh, vnanh, vnanmaskh);
const __m128i vh = _mm_or_si128(vabsh, vsignh);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if XNN_UNPREDICTABLE(batch != 0) {
const __m128 vx_lo = _mm_loadu_ps(input);
const float* input_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
const __m128 vx_hi = _mm_loadu_ps(input_hi);
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
const __m128i vabsh = _mm_blendv_epi8(vnonsignh, vnanh, vnanmaskh);
__m128i vh = _mm_or_si128(vabsh, vsignh);
if (batch & (4 * sizeof(float))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(float))) {
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vh));
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(float))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 10,601 | 42.097561 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-avx-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__avx_x24(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vnonsign_mask = _mm_load_ps((const float*) params->sse2.nonsign_mask);
const __m128i vexp_bias = _mm_load_si128((const __m128i*) params->sse2.exp_bias);
const __m128 vscale_to_inf = _mm_load_ps(params->sse2.scale_to_inf);
const __m128i vexpw_max = _mm_load_si128((const __m128i*) params->sse2.expw_max);
const __m128 vscale_to_zero = _mm_load_ps(params->sse2.scale_to_zero);
const __m128i vbias_min = _mm_load_si128((const __m128i*) params->sse2.bias_min);
const __m128i vmanth_mask = _mm_load_si128((const __m128i*) params->sse2.manth_mask);
const __m128i vexph_mask = _mm_load_si128((const __m128i*) params->sse2.exph_mask);
const __m128i vnanh = _mm_load_si128((const __m128i*) params->sse2.nanh);
uint16_t* o = (uint16_t*) output;
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const __m128 vx0 = _mm_loadu_ps(input);
const __m128 vx1 = _mm_loadu_ps(input + 4);
const __m128 vx2 = _mm_loadu_ps(input + 8);
const __m128 vx3 = _mm_loadu_ps(input + 12);
const __m128 vx4 = _mm_loadu_ps(input + 16);
const __m128 vx5 = _mm_loadu_ps(input + 20);
input += 24;
const __m128 vabsx0 = _mm_and_ps(vx0, vnonsign_mask);
const __m128 vabsx1 = _mm_and_ps(vx1, vnonsign_mask);
const __m128 vabsx2 = _mm_and_ps(vx2, vnonsign_mask);
const __m128 vabsx3 = _mm_and_ps(vx3, vnonsign_mask);
const __m128 vabsx4 = _mm_and_ps(vx4, vnonsign_mask);
const __m128 vabsx5 = _mm_and_ps(vx5, vnonsign_mask);
const __m128 vsignx0 = _mm_xor_ps(vx0, vabsx0);
const __m128 vsignx1 = _mm_xor_ps(vx1, vabsx1);
const __m128 vsignx2 = _mm_xor_ps(vx2, vabsx2);
const __m128 vsignx3 = _mm_xor_ps(vx3, vabsx3);
const __m128 vsignx4 = _mm_xor_ps(vx4, vabsx4);
const __m128 vsignx5 = _mm_xor_ps(vx5, vabsx5);
__m128i vbias0 = _mm_add_epi32(_mm_castps_si128(vabsx0), vexp_bias);
__m128i vbias1 = _mm_add_epi32(_mm_castps_si128(vabsx1), vexp_bias);
__m128i vbias2 = _mm_add_epi32(_mm_castps_si128(vabsx2), vexp_bias);
__m128i vbias3 = _mm_add_epi32(_mm_castps_si128(vabsx3), vexp_bias);
__m128i vbias4 = _mm_add_epi32(_mm_castps_si128(vabsx4), vexp_bias);
__m128i vbias5 = _mm_add_epi32(_mm_castps_si128(vabsx5), vexp_bias);
__m128 vf0 = _mm_mul_ps(vabsx0, vscale_to_inf);
__m128 vf1 = _mm_mul_ps(vabsx1, vscale_to_inf);
__m128 vf2 = _mm_mul_ps(vabsx2, vscale_to_inf);
__m128 vf3 = _mm_mul_ps(vabsx3, vscale_to_inf);
__m128 vf4 = _mm_mul_ps(vabsx4, vscale_to_inf);
__m128 vf5 = _mm_mul_ps(vabsx5, vscale_to_inf);
const __m128i vnanmaskw0 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx0), vexpw_max);
const __m128i vnanmaskw1 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx1), vexpw_max);
const __m128i vnanmaskw2 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx2), vexpw_max);
const __m128i vnanmaskw3 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx3), vexpw_max);
const __m128i vnanmaskw4 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx4), vexpw_max);
const __m128i vnanmaskw5 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx5), vexpw_max);
vbias0 = _mm_and_si128(vbias0, vexpw_max);
vbias1 = _mm_and_si128(vbias1, vexpw_max);
vbias2 = _mm_and_si128(vbias2, vexpw_max);
vbias3 = _mm_and_si128(vbias3, vexpw_max);
vbias4 = _mm_and_si128(vbias4, vexpw_max);
vbias5 = _mm_and_si128(vbias5, vexpw_max);
vf0 = _mm_mul_ps(vf0, vscale_to_zero);
vf1 = _mm_mul_ps(vf1, vscale_to_zero);
vf2 = _mm_mul_ps(vf2, vscale_to_zero);
vf3 = _mm_mul_ps(vf3, vscale_to_zero);
vf4 = _mm_mul_ps(vf4, vscale_to_zero);
vf5 = _mm_mul_ps(vf5, vscale_to_zero);
const __m128i vnanmaskh0 = _mm_packs_epi32(vnanmaskw0, vnanmaskw1);
const __m128i vnanmaskh1 = _mm_packs_epi32(vnanmaskw2, vnanmaskw3);
const __m128i vnanmaskh2 = _mm_packs_epi32(vnanmaskw4, vnanmaskw5);
const __m128i vsignh0 = _mm_packs_epi32(_mm_castps_si128(vsignx0), _mm_castps_si128(vsignx1));
const __m128i vsignh1 = _mm_packs_epi32(_mm_castps_si128(vsignx2), _mm_castps_si128(vsignx3));
const __m128i vsignh2 = _mm_packs_epi32(_mm_castps_si128(vsignx4), _mm_castps_si128(vsignx5));
vbias0 = _mm_max_epi16(vbias0, vbias_min);
vbias1 = _mm_max_epi16(vbias1, vbias_min);
vbias2 = _mm_max_epi16(vbias2, vbias_min);
vbias3 = _mm_max_epi16(vbias3, vbias_min);
vbias4 = _mm_max_epi16(vbias4, vbias_min);
vbias5 = _mm_max_epi16(vbias5, vbias_min);
vf0 = _mm_add_ps(vf0, _mm_castsi128_ps(vbias0));
vf1 = _mm_add_ps(vf1, _mm_castsi128_ps(vbias1));
vf2 = _mm_add_ps(vf2, _mm_castsi128_ps(vbias2));
vf3 = _mm_add_ps(vf3, _mm_castsi128_ps(vbias3));
vf4 = _mm_add_ps(vf4, _mm_castsi128_ps(vbias4));
vf5 = _mm_add_ps(vf5, _mm_castsi128_ps(vbias5));
__m128i vexpw0 = _mm_srli_epi32(_mm_castps_si128(vf0), 13);
__m128i vexpw1 = _mm_srli_epi32(_mm_castps_si128(vf1), 13);
__m128i vexpw2 = _mm_srli_epi32(_mm_castps_si128(vf2), 13);
__m128i vexpw3 = _mm_srli_epi32(_mm_castps_si128(vf3), 13);
__m128i vexpw4 = _mm_srli_epi32(_mm_castps_si128(vf4), 13);
__m128i vexpw5 = _mm_srli_epi32(_mm_castps_si128(vf5), 13);
const __m128i vmantw0 = _mm_and_si128(_mm_castps_si128(vf0), vmanth_mask);
const __m128i vmantw1 = _mm_and_si128(_mm_castps_si128(vf1), vmanth_mask);
const __m128i vmantw2 = _mm_and_si128(_mm_castps_si128(vf2), vmanth_mask);
const __m128i vmantw3 = _mm_and_si128(_mm_castps_si128(vf3), vmanth_mask);
const __m128i vmantw4 = _mm_and_si128(_mm_castps_si128(vf4), vmanth_mask);
const __m128i vmantw5 = _mm_and_si128(_mm_castps_si128(vf5), vmanth_mask);
vexpw0 = _mm_and_si128(vexpw0, vexph_mask);
vexpw1 = _mm_and_si128(vexpw1, vexph_mask);
vexpw2 = _mm_and_si128(vexpw2, vexph_mask);
vexpw3 = _mm_and_si128(vexpw3, vexph_mask);
vexpw4 = _mm_and_si128(vexpw4, vexph_mask);
vexpw5 = _mm_and_si128(vexpw5, vexph_mask);
const __m128i vnonsignw0 = _mm_add_epi32(vmantw0, vexpw0);
const __m128i vnonsignw1 = _mm_add_epi32(vmantw1, vexpw1);
const __m128i vnonsignw2 = _mm_add_epi32(vmantw2, vexpw2);
const __m128i vnonsignw3 = _mm_add_epi32(vmantw3, vexpw3);
const __m128i vnonsignw4 = _mm_add_epi32(vmantw4, vexpw4);
const __m128i vnonsignw5 = _mm_add_epi32(vmantw5, vexpw5);
const __m128i vnonsignh0 = _mm_packs_epi32(vnonsignw0, vnonsignw1);
const __m128i vnonsignh1 = _mm_packs_epi32(vnonsignw2, vnonsignw3);
const __m128i vnonsignh2 = _mm_packs_epi32(vnonsignw4, vnonsignw5);
const __m128i vabsh0 = _mm_blendv_epi8(vnonsignh0, vnanh, vnanmaskh0);
const __m128i vabsh1 = _mm_blendv_epi8(vnonsignh1, vnanh, vnanmaskh1);
const __m128i vabsh2 = _mm_blendv_epi8(vnonsignh2, vnanh, vnanmaskh2);
const __m128i vh0 = _mm_or_si128(vabsh0, vsignh0);
const __m128i vh1 = _mm_or_si128(vabsh1, vsignh1);
const __m128i vh2 = _mm_or_si128(vabsh2, vsignh2);
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
_mm_storeu_si128((__m128i*) (o + 16), vh2);
o += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx_lo = _mm_loadu_ps(input);
const __m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
const __m128i vabsh = _mm_blendv_epi8(vnonsignh, vnanh, vnanmaskh);
const __m128i vh = _mm_or_si128(vabsh, vsignh);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if XNN_UNPREDICTABLE(batch != 0) {
const __m128 vx_lo = _mm_loadu_ps(input);
const float* input_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
const __m128 vx_hi = _mm_loadu_ps(input_hi);
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
const __m128i vabsh = _mm_blendv_epi8(vnonsignh, vnanh, vnanmaskh);
__m128i vh = _mm_or_si128(vabsh, vsignh);
if (batch & (4 * sizeof(float))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(float))) {
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vh));
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(float))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 12,648 | 44.175 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-avx-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__avx_x32(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vnonsign_mask = _mm_load_ps((const float*) params->sse2.nonsign_mask);
const __m128i vexp_bias = _mm_load_si128((const __m128i*) params->sse2.exp_bias);
const __m128 vscale_to_inf = _mm_load_ps(params->sse2.scale_to_inf);
const __m128i vexpw_max = _mm_load_si128((const __m128i*) params->sse2.expw_max);
const __m128 vscale_to_zero = _mm_load_ps(params->sse2.scale_to_zero);
const __m128i vbias_min = _mm_load_si128((const __m128i*) params->sse2.bias_min);
const __m128i vmanth_mask = _mm_load_si128((const __m128i*) params->sse2.manth_mask);
const __m128i vexph_mask = _mm_load_si128((const __m128i*) params->sse2.exph_mask);
const __m128i vnanh = _mm_load_si128((const __m128i*) params->sse2.nanh);
uint16_t* o = (uint16_t*) output;
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m128 vx0 = _mm_loadu_ps(input);
const __m128 vx1 = _mm_loadu_ps(input + 4);
const __m128 vx2 = _mm_loadu_ps(input + 8);
const __m128 vx3 = _mm_loadu_ps(input + 12);
const __m128 vx4 = _mm_loadu_ps(input + 16);
const __m128 vx5 = _mm_loadu_ps(input + 20);
const __m128 vx6 = _mm_loadu_ps(input + 24);
const __m128 vx7 = _mm_loadu_ps(input + 28);
input += 32;
const __m128 vabsx0 = _mm_and_ps(vx0, vnonsign_mask);
const __m128 vabsx1 = _mm_and_ps(vx1, vnonsign_mask);
const __m128 vabsx2 = _mm_and_ps(vx2, vnonsign_mask);
const __m128 vabsx3 = _mm_and_ps(vx3, vnonsign_mask);
const __m128 vabsx4 = _mm_and_ps(vx4, vnonsign_mask);
const __m128 vabsx5 = _mm_and_ps(vx5, vnonsign_mask);
const __m128 vabsx6 = _mm_and_ps(vx6, vnonsign_mask);
const __m128 vabsx7 = _mm_and_ps(vx7, vnonsign_mask);
const __m128 vsignx0 = _mm_xor_ps(vx0, vabsx0);
const __m128 vsignx1 = _mm_xor_ps(vx1, vabsx1);
const __m128 vsignx2 = _mm_xor_ps(vx2, vabsx2);
const __m128 vsignx3 = _mm_xor_ps(vx3, vabsx3);
const __m128 vsignx4 = _mm_xor_ps(vx4, vabsx4);
const __m128 vsignx5 = _mm_xor_ps(vx5, vabsx5);
const __m128 vsignx6 = _mm_xor_ps(vx6, vabsx6);
const __m128 vsignx7 = _mm_xor_ps(vx7, vabsx7);
__m128i vbias0 = _mm_add_epi32(_mm_castps_si128(vabsx0), vexp_bias);
__m128i vbias1 = _mm_add_epi32(_mm_castps_si128(vabsx1), vexp_bias);
__m128i vbias2 = _mm_add_epi32(_mm_castps_si128(vabsx2), vexp_bias);
__m128i vbias3 = _mm_add_epi32(_mm_castps_si128(vabsx3), vexp_bias);
__m128i vbias4 = _mm_add_epi32(_mm_castps_si128(vabsx4), vexp_bias);
__m128i vbias5 = _mm_add_epi32(_mm_castps_si128(vabsx5), vexp_bias);
__m128i vbias6 = _mm_add_epi32(_mm_castps_si128(vabsx6), vexp_bias);
__m128i vbias7 = _mm_add_epi32(_mm_castps_si128(vabsx7), vexp_bias);
__m128 vf0 = _mm_mul_ps(vabsx0, vscale_to_inf);
__m128 vf1 = _mm_mul_ps(vabsx1, vscale_to_inf);
__m128 vf2 = _mm_mul_ps(vabsx2, vscale_to_inf);
__m128 vf3 = _mm_mul_ps(vabsx3, vscale_to_inf);
__m128 vf4 = _mm_mul_ps(vabsx4, vscale_to_inf);
__m128 vf5 = _mm_mul_ps(vabsx5, vscale_to_inf);
__m128 vf6 = _mm_mul_ps(vabsx6, vscale_to_inf);
__m128 vf7 = _mm_mul_ps(vabsx7, vscale_to_inf);
const __m128i vnanmaskw0 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx0), vexpw_max);
const __m128i vnanmaskw1 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx1), vexpw_max);
const __m128i vnanmaskw2 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx2), vexpw_max);
const __m128i vnanmaskw3 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx3), vexpw_max);
const __m128i vnanmaskw4 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx4), vexpw_max);
const __m128i vnanmaskw5 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx5), vexpw_max);
const __m128i vnanmaskw6 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx6), vexpw_max);
const __m128i vnanmaskw7 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx7), vexpw_max);
vbias0 = _mm_and_si128(vbias0, vexpw_max);
vbias1 = _mm_and_si128(vbias1, vexpw_max);
vbias2 = _mm_and_si128(vbias2, vexpw_max);
vbias3 = _mm_and_si128(vbias3, vexpw_max);
vbias4 = _mm_and_si128(vbias4, vexpw_max);
vbias5 = _mm_and_si128(vbias5, vexpw_max);
vbias6 = _mm_and_si128(vbias6, vexpw_max);
vbias7 = _mm_and_si128(vbias7, vexpw_max);
vf0 = _mm_mul_ps(vf0, vscale_to_zero);
vf1 = _mm_mul_ps(vf1, vscale_to_zero);
vf2 = _mm_mul_ps(vf2, vscale_to_zero);
vf3 = _mm_mul_ps(vf3, vscale_to_zero);
vf4 = _mm_mul_ps(vf4, vscale_to_zero);
vf5 = _mm_mul_ps(vf5, vscale_to_zero);
vf6 = _mm_mul_ps(vf6, vscale_to_zero);
vf7 = _mm_mul_ps(vf7, vscale_to_zero);
const __m128i vnanmaskh0 = _mm_packs_epi32(vnanmaskw0, vnanmaskw1);
const __m128i vnanmaskh1 = _mm_packs_epi32(vnanmaskw2, vnanmaskw3);
const __m128i vnanmaskh2 = _mm_packs_epi32(vnanmaskw4, vnanmaskw5);
const __m128i vnanmaskh3 = _mm_packs_epi32(vnanmaskw6, vnanmaskw7);
const __m128i vsignh0 = _mm_packs_epi32(_mm_castps_si128(vsignx0), _mm_castps_si128(vsignx1));
const __m128i vsignh1 = _mm_packs_epi32(_mm_castps_si128(vsignx2), _mm_castps_si128(vsignx3));
const __m128i vsignh2 = _mm_packs_epi32(_mm_castps_si128(vsignx4), _mm_castps_si128(vsignx5));
const __m128i vsignh3 = _mm_packs_epi32(_mm_castps_si128(vsignx6), _mm_castps_si128(vsignx7));
vbias0 = _mm_max_epi16(vbias0, vbias_min);
vbias1 = _mm_max_epi16(vbias1, vbias_min);
vbias2 = _mm_max_epi16(vbias2, vbias_min);
vbias3 = _mm_max_epi16(vbias3, vbias_min);
vbias4 = _mm_max_epi16(vbias4, vbias_min);
vbias5 = _mm_max_epi16(vbias5, vbias_min);
vbias6 = _mm_max_epi16(vbias6, vbias_min);
vbias7 = _mm_max_epi16(vbias7, vbias_min);
vf0 = _mm_add_ps(vf0, _mm_castsi128_ps(vbias0));
vf1 = _mm_add_ps(vf1, _mm_castsi128_ps(vbias1));
vf2 = _mm_add_ps(vf2, _mm_castsi128_ps(vbias2));
vf3 = _mm_add_ps(vf3, _mm_castsi128_ps(vbias3));
vf4 = _mm_add_ps(vf4, _mm_castsi128_ps(vbias4));
vf5 = _mm_add_ps(vf5, _mm_castsi128_ps(vbias5));
vf6 = _mm_add_ps(vf6, _mm_castsi128_ps(vbias6));
vf7 = _mm_add_ps(vf7, _mm_castsi128_ps(vbias7));
__m128i vexpw0 = _mm_srli_epi32(_mm_castps_si128(vf0), 13);
__m128i vexpw1 = _mm_srli_epi32(_mm_castps_si128(vf1), 13);
__m128i vexpw2 = _mm_srli_epi32(_mm_castps_si128(vf2), 13);
__m128i vexpw3 = _mm_srli_epi32(_mm_castps_si128(vf3), 13);
__m128i vexpw4 = _mm_srli_epi32(_mm_castps_si128(vf4), 13);
__m128i vexpw5 = _mm_srli_epi32(_mm_castps_si128(vf5), 13);
__m128i vexpw6 = _mm_srli_epi32(_mm_castps_si128(vf6), 13);
__m128i vexpw7 = _mm_srli_epi32(_mm_castps_si128(vf7), 13);
const __m128i vmantw0 = _mm_and_si128(_mm_castps_si128(vf0), vmanth_mask);
const __m128i vmantw1 = _mm_and_si128(_mm_castps_si128(vf1), vmanth_mask);
const __m128i vmantw2 = _mm_and_si128(_mm_castps_si128(vf2), vmanth_mask);
const __m128i vmantw3 = _mm_and_si128(_mm_castps_si128(vf3), vmanth_mask);
const __m128i vmantw4 = _mm_and_si128(_mm_castps_si128(vf4), vmanth_mask);
const __m128i vmantw5 = _mm_and_si128(_mm_castps_si128(vf5), vmanth_mask);
const __m128i vmantw6 = _mm_and_si128(_mm_castps_si128(vf6), vmanth_mask);
const __m128i vmantw7 = _mm_and_si128(_mm_castps_si128(vf7), vmanth_mask);
vexpw0 = _mm_and_si128(vexpw0, vexph_mask);
vexpw1 = _mm_and_si128(vexpw1, vexph_mask);
vexpw2 = _mm_and_si128(vexpw2, vexph_mask);
vexpw3 = _mm_and_si128(vexpw3, vexph_mask);
vexpw4 = _mm_and_si128(vexpw4, vexph_mask);
vexpw5 = _mm_and_si128(vexpw5, vexph_mask);
vexpw6 = _mm_and_si128(vexpw6, vexph_mask);
vexpw7 = _mm_and_si128(vexpw7, vexph_mask);
const __m128i vnonsignw0 = _mm_add_epi32(vmantw0, vexpw0);
const __m128i vnonsignw1 = _mm_add_epi32(vmantw1, vexpw1);
const __m128i vnonsignw2 = _mm_add_epi32(vmantw2, vexpw2);
const __m128i vnonsignw3 = _mm_add_epi32(vmantw3, vexpw3);
const __m128i vnonsignw4 = _mm_add_epi32(vmantw4, vexpw4);
const __m128i vnonsignw5 = _mm_add_epi32(vmantw5, vexpw5);
const __m128i vnonsignw6 = _mm_add_epi32(vmantw6, vexpw6);
const __m128i vnonsignw7 = _mm_add_epi32(vmantw7, vexpw7);
const __m128i vnonsignh0 = _mm_packs_epi32(vnonsignw0, vnonsignw1);
const __m128i vnonsignh1 = _mm_packs_epi32(vnonsignw2, vnonsignw3);
const __m128i vnonsignh2 = _mm_packs_epi32(vnonsignw4, vnonsignw5);
const __m128i vnonsignh3 = _mm_packs_epi32(vnonsignw6, vnonsignw7);
const __m128i vabsh0 = _mm_blendv_epi8(vnonsignh0, vnanh, vnanmaskh0);
const __m128i vabsh1 = _mm_blendv_epi8(vnonsignh1, vnanh, vnanmaskh1);
const __m128i vabsh2 = _mm_blendv_epi8(vnonsignh2, vnanh, vnanmaskh2);
const __m128i vabsh3 = _mm_blendv_epi8(vnonsignh3, vnanh, vnanmaskh3);
const __m128i vh0 = _mm_or_si128(vabsh0, vsignh0);
const __m128i vh1 = _mm_or_si128(vabsh1, vsignh1);
const __m128i vh2 = _mm_or_si128(vabsh2, vsignh2);
const __m128i vh3 = _mm_or_si128(vabsh3, vsignh3);
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
_mm_storeu_si128((__m128i*) (o + 16), vh2);
_mm_storeu_si128((__m128i*) (o + 24), vh3);
o += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx_lo = _mm_loadu_ps(input);
const __m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
const __m128i vabsh = _mm_blendv_epi8(vnonsignh, vnanh, vnanmaskh);
const __m128i vh = _mm_or_si128(vabsh, vsignh);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if XNN_UNPREDICTABLE(batch != 0) {
const __m128 vx_lo = _mm_loadu_ps(input);
const float* input_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
const __m128 vx_hi = _mm_loadu_ps(input_hi);
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
const __m128i vabsh = _mm_blendv_epi8(vnonsignh, vnanh, vnanmaskh);
__m128i vh = _mm_or_si128(vabsh, vsignh);
if (batch & (4 * sizeof(float))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(float))) {
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vh));
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(float))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 14,695 | 45.802548 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__avx_x8(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vnonsign_mask = _mm_load_ps((const float*) params->sse2.nonsign_mask);
const __m128i vexp_bias = _mm_load_si128((const __m128i*) params->sse2.exp_bias);
const __m128 vscale_to_inf = _mm_load_ps(params->sse2.scale_to_inf);
const __m128i vexpw_max = _mm_load_si128((const __m128i*) params->sse2.expw_max);
const __m128 vscale_to_zero = _mm_load_ps(params->sse2.scale_to_zero);
const __m128i vbias_min = _mm_load_si128((const __m128i*) params->sse2.bias_min);
const __m128i vmanth_mask = _mm_load_si128((const __m128i*) params->sse2.manth_mask);
const __m128i vexph_mask = _mm_load_si128((const __m128i*) params->sse2.exph_mask);
const __m128i vnanh = _mm_load_si128((const __m128i*) params->sse2.nanh);
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx_lo = _mm_loadu_ps(input);
const __m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
const __m128i vabsh = _mm_blendv_epi8(vnonsignh, vnanh, vnanmaskh);
const __m128i vh = _mm_or_si128(vabsh, vsignh);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if XNN_UNPREDICTABLE(batch != 0) {
const __m128 vx_lo = _mm_loadu_ps(input);
const float* input_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
const __m128 vx_hi = _mm_loadu_ps(input_hi);
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
const __m128i vabsh = _mm_blendv_epi8(vnonsignh, vnanh, vnanmaskh);
__m128i vh = _mm_or_si128(vabsh, vsignh);
if (batch & (4 * sizeof(float))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(float))) {
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vh));
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(float))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 6,397 | 40.816993 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-avx512skx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/avx512skx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__avx512skx_x16(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vf = _mm512_loadu_ps(input);
input += 16;
_mm256_storeu_si256((__m256i*) o, _mm512_cvtps_ph(vf, _MM_FROUND_NO_EXC | _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vf = _mm512_maskz_loadu_ps(vmask, input);
const __m256i vh = _mm512_cvtps_ph(vf, _MM_FROUND_NO_EXC | _MM_FROUND_TO_NEAREST_INT);
_mm256_mask_storeu_epi16(o, vmask, vh);
}
}
| 1,529 | 29 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-avx512skx-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/avx512skx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__avx512skx_x32(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
uint16_t* o = (uint16_t*) output;
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m512 vf0 = _mm512_loadu_ps(input);
const __m512 vf1 = _mm512_loadu_ps(input + 16);
input += 32;
_mm256_storeu_si256((__m256i*) o, _mm512_cvtps_ph(vf0, _MM_FROUND_NO_EXC | _MM_FROUND_TO_NEAREST_INT));
_mm256_storeu_si256((__m256i*) (o + 16), _mm512_cvtps_ph(vf1, _MM_FROUND_NO_EXC | _MM_FROUND_TO_NEAREST_INT));
o += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vf = _mm512_loadu_ps(input);
input += 16;
_mm256_storeu_si256((__m256i*) o, _mm512_cvtps_ph(vf, _MM_FROUND_NO_EXC | _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vf = _mm512_maskz_loadu_ps(vmask, input);
const __m256i vh = _mm512_cvtps_ph(vf, _MM_FROUND_NO_EXC | _MM_FROUND_TO_NEAREST_INT);
_mm256_mask_storeu_epi16(o, vmask, vh);
}
}
| 1,955 | 31.6 | 114 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-f16c-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/f16c.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__f16c_x16(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m256 vf0 = _mm256_loadu_ps(input);
const __m256 vf1 = _mm256_loadu_ps(input + 8);
input += 16;
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vf = _mm256_loadu_ps(input);
input += 8;
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->f16c.mask_table[7] - batch));
const __m256 vf = _mm256_maskload_ps(input, vmask);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storel_epi64((__m128i*) o, _mm_cvtps_ph(vf_lo, _MM_FROUND_TO_NEAREST_INT));
vf_lo = _mm256_extractf128_ps(vf, 1);
o += 4;
}
__m128i vh = _mm_cvtps_ph(vf_lo, _MM_FROUND_TO_NEAREST_INT);
if (batch & (2 * sizeof(float))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(float))) {
*((uint16_t*) o) = _mm_extract_epi16(vh, 0);
}
}
}
| 2,164 | 29.492958 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/f16c.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__f16c_x8(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vf = _mm256_loadu_ps(input);
input += 8;
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->f16c.mask_table[7] - batch));
const __m256 vf = _mm256_maskload_ps(input, vmask);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storel_epi64((__m128i*) o, _mm_cvtps_ph(vf_lo, _MM_FROUND_TO_NEAREST_INT));
vf_lo = _mm256_extractf128_ps(vf, 1);
o += 4;
}
__m128i vh = _mm_cvtps_ph(vf_lo, _MM_FROUND_TO_NEAREST_INT);
if (batch & (2 * sizeof(float))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(float))) {
*((uint16_t*) o) = _mm_extract_epi16(vh, 0);
}
}
}
| 1,785 | 27.806452 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-neon-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__neon_x16(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const uint32x4_t vexp_bias = vld1q_dup_u32(¶ms->neon.exp_bias);
const float32x4_t vscale_to_inf = vld1q_dup_f32(¶ms->neon.scale_to_inf);
const uint32x4_t vexpw_max = vld1q_dup_u32(¶ms->neon.expw_max);
const float32x4_t vscale_to_zero = vld1q_dup_f32(¶ms->neon.scale_to_zero);
const uint32x4_t vbias_min = vdupq_n_u32(UINT32_C(0x40000000));
const uint16x8_t vexph_mask = vdupq_n_u16(UINT16_C(0x7C00));
const uint16x8_t vmanth_mask = vdupq_n_u16(UINT16_C(0x0FFF));
const uint16x8_t vsignh_mask = vdupq_n_u16(UINT16_C(0x8000));
const uint16x8_t vnanh = vdupq_n_u16(UINT16_C(0x7E00));
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vx0 = vld1q_f32(input); input += 4;
const float32x4_t vx1 = vld1q_f32(input); input += 4;
const float32x4_t vx2 = vld1q_f32(input); input += 4;
const float32x4_t vx3 = vld1q_f32(input); input += 4;
const float32x4_t vabsx0 = vabsq_f32(vx0);
const float32x4_t vabsx1 = vabsq_f32(vx1);
const float32x4_t vabsx2 = vabsq_f32(vx2);
const float32x4_t vabsx3 = vabsq_f32(vx3);
uint32x4_t vbias0 = vaddq_u32(vreinterpretq_u32_f32(vabsx0), vexp_bias);
uint32x4_t vbias1 = vaddq_u32(vreinterpretq_u32_f32(vabsx1), vexp_bias);
uint32x4_t vbias2 = vaddq_u32(vreinterpretq_u32_f32(vabsx2), vexp_bias);
uint32x4_t vbias3 = vaddq_u32(vreinterpretq_u32_f32(vabsx3), vexp_bias);
float32x4_t vf0 = vmulq_f32(vabsx0, vscale_to_inf);
float32x4_t vf1 = vmulq_f32(vabsx1, vscale_to_inf);
float32x4_t vf2 = vmulq_f32(vabsx2, vscale_to_inf);
float32x4_t vf3 = vmulq_f32(vabsx3, vscale_to_inf);
const uint32x4_t vnanmaskw0 = vcgtq_u32(vreinterpretq_u32_f32(vabsx0), vexpw_max);
const uint32x4_t vnanmaskw1 = vcgtq_u32(vreinterpretq_u32_f32(vabsx1), vexpw_max);
const uint32x4_t vnanmaskw2 = vcgtq_u32(vreinterpretq_u32_f32(vabsx2), vexpw_max);
const uint32x4_t vnanmaskw3 = vcgtq_u32(vreinterpretq_u32_f32(vabsx3), vexpw_max);
vbias0 = vandq_u32(vbias0, vexpw_max);
vbias1 = vandq_u32(vbias1, vexpw_max);
vbias2 = vandq_u32(vbias2, vexpw_max);
vbias3 = vandq_u32(vbias3, vexpw_max);
vf0 = vmulq_f32(vf0, vscale_to_zero);
vf1 = vmulq_f32(vf1, vscale_to_zero);
vf2 = vmulq_f32(vf2, vscale_to_zero);
vf3 = vmulq_f32(vf3, vscale_to_zero);
const uint16x8_t vnanmaskh0 = vcombine_u16(vmovn_u32(vnanmaskw0), vmovn_u32(vnanmaskw1));
const uint16x8_t vnanmaskh1 = vcombine_u16(vmovn_u32(vnanmaskw2), vmovn_u32(vnanmaskw3));
vbias0 = vmaxq_u32(vbias0, vbias_min);
vbias1 = vmaxq_u32(vbias1, vbias_min);
vbias2 = vmaxq_u32(vbias2, vbias_min);
vbias3 = vmaxq_u32(vbias3, vbias_min);
vf0 = vaddq_f32(vf0, vreinterpretq_f32_u32(vbias0));
vf1 = vaddq_f32(vf1, vreinterpretq_f32_u32(vbias1));
vf2 = vaddq_f32(vf2, vreinterpretq_f32_u32(vbias2));
vf3 = vaddq_f32(vf3, vreinterpretq_f32_u32(vbias3));
uint16x8_t vexph0 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vf0), 13), vshrn_n_u32(vreinterpretq_u32_f32(vf1), 13));
uint16x8_t vexph1 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vf2), 13), vshrn_n_u32(vreinterpretq_u32_f32(vf3), 13));
uint16x8_t vmanth0 = vcombine_u16(vmovn_u32(vreinterpretq_u32_f32(vf0)), vmovn_u32(vreinterpretq_u32_f32(vf1)));
uint16x8_t vmanth1 = vcombine_u16(vmovn_u32(vreinterpretq_u32_f32(vf2)), vmovn_u32(vreinterpretq_u32_f32(vf3)));
uint16x8_t vsignh0 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vx0), 16), vshrn_n_u32(vreinterpretq_u32_f32(vx1), 16));
uint16x8_t vsignh1 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vx2), 16), vshrn_n_u32(vreinterpretq_u32_f32(vx3), 16));
vexph0 = vandq_u16(vexph0, vexph_mask);
vexph1 = vandq_u16(vexph1, vexph_mask);
vmanth0 = vandq_u16(vmanth0, vmanth_mask);
vmanth1 = vandq_u16(vmanth1, vmanth_mask);
vsignh0 = vandq_u16(vsignh0, vsignh_mask);
vsignh1 = vandq_u16(vsignh1, vsignh_mask);
uint16x8_t vh0 = vaddq_u16(vmanth0, vexph0);
uint16x8_t vh1 = vaddq_u16(vmanth1, vexph1);
vh0 = vbslq_u16(vnanmaskh0, vnanh, vh0);
vh1 = vbslq_u16(vnanmaskh1, vnanh, vh1);
vh0 = vorrq_u16(vh0, vsignh0);
vh1 = vorrq_u16(vh1, vsignh1);
vst1q_u16(o, vh0); o += 8;
vst1q_u16(o, vh1); o += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vabsx = vabsq_f32(vx);
uint32x4_t vbias = vaddq_u32(vreinterpretq_u32_f32(vabsx), vexp_bias);
float32x4_t vf = vmulq_f32(vabsx, vscale_to_inf);
const uint32x4_t vnanmaskw = vcgtq_u32(vreinterpretq_u32_f32(vabsx), vexpw_max);
vbias = vandq_u32(vbias, vexpw_max);
vf = vmulq_f32(vf, vscale_to_zero);
const uint16x4_t vnanmaskh = vmovn_u32(vnanmaskw);
vbias = vmaxq_u32(vbias, vbias_min);
vf = vaddq_f32(vf, vreinterpretq_f32_u32(vbias));
uint16x4_t vexph = vshrn_n_u32(vreinterpretq_u32_f32(vf), 13);
uint16x4_t vmanth = vmovn_u32(vreinterpretq_u32_f32(vf));
uint16x4_t vsignh = vshrn_n_u32(vreinterpretq_u32_f32(vx), 16);
vexph = vand_u16(vexph, vget_low_u16(vexph_mask));
vmanth = vand_u16(vmanth, vget_low_u16(vmanth_mask));
vsignh = vand_u16(vsignh, vget_low_u16(vsignh_mask));
uint16x4_t vh = vadd_u16(vmanth, vexph);
vh = vbsl_u16(vnanmaskh, vget_low_u16(vnanh), vh);
vh = vorr_u16(vh, vsignh);
vst1_u16(o, vh); o += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch % sizeof(float) == 0);
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vabsx = vabsq_f32(vx);
uint32x4_t vbias = vaddq_u32(vreinterpretq_u32_f32(vabsx), vexp_bias);
float32x4_t vf = vmulq_f32(vabsx, vscale_to_inf);
const uint32x4_t vnanmaskw = vcgtq_u32(vreinterpretq_u32_f32(vabsx), vexpw_max);
vbias = vandq_u32(vbias, vexpw_max);
vf = vmulq_f32(vf, vscale_to_zero);
const uint16x4_t vnanmaskh = vmovn_u32(vnanmaskw);
vbias = vmaxq_u32(vbias, vbias_min);
vf = vaddq_f32(vf, vreinterpretq_f32_u32(vbias));
uint16x4_t vexph = vshrn_n_u32(vreinterpretq_u32_f32(vf), 13);
uint16x4_t vmanth = vmovn_u32(vreinterpretq_u32_f32(vf));
uint16x4_t vsignh = vshrn_n_u32(vreinterpretq_u32_f32(vx), 16);
vexph = vand_u16(vexph, vget_low_u16(vexph_mask));
vmanth = vand_u16(vmanth, vget_low_u16(vmanth_mask));
vsignh = vand_u16(vsignh, vget_low_u16(vsignh_mask));
uint16x4_t vh = vadd_u16(vmanth, vexph);
vh = vbsl_u16(vnanmaskh, vget_low_u16(vnanh), vh);
vh = vorr_u16(vh, vsignh);
if (batch & (2 * sizeof(float))) {
vst1_lane_u32((void*) o, vreinterpret_u32_u16(vh), 0); o += 2;
vh = vext_u16(vh, vh, 2);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_u16(o, vh, 0);
}
}
}
| 7,574 | 38.868421 | 128 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-neon-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__neon_x24(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const uint32x4_t vexp_bias = vld1q_dup_u32(¶ms->neon.exp_bias);
const float32x4_t vscale_to_inf = vld1q_dup_f32(¶ms->neon.scale_to_inf);
const uint32x4_t vexpw_max = vld1q_dup_u32(¶ms->neon.expw_max);
const float32x4_t vscale_to_zero = vld1q_dup_f32(¶ms->neon.scale_to_zero);
const uint32x4_t vbias_min = vdupq_n_u32(UINT32_C(0x40000000));
const uint16x8_t vexph_mask = vdupq_n_u16(UINT16_C(0x7C00));
const uint16x8_t vmanth_mask = vdupq_n_u16(UINT16_C(0x0FFF));
const uint16x8_t vsignh_mask = vdupq_n_u16(UINT16_C(0x8000));
const uint16x8_t vnanh = vdupq_n_u16(UINT16_C(0x7E00));
uint16_t* o = (uint16_t*) output;
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const float32x4_t vx0 = vld1q_f32(input); input += 4;
const float32x4_t vx1 = vld1q_f32(input); input += 4;
const float32x4_t vx2 = vld1q_f32(input); input += 4;
const float32x4_t vx3 = vld1q_f32(input); input += 4;
const float32x4_t vx4 = vld1q_f32(input); input += 4;
const float32x4_t vx5 = vld1q_f32(input); input += 4;
const float32x4_t vabsx0 = vabsq_f32(vx0);
const float32x4_t vabsx1 = vabsq_f32(vx1);
const float32x4_t vabsx2 = vabsq_f32(vx2);
const float32x4_t vabsx3 = vabsq_f32(vx3);
const float32x4_t vabsx4 = vabsq_f32(vx4);
const float32x4_t vabsx5 = vabsq_f32(vx5);
uint32x4_t vbias0 = vaddq_u32(vreinterpretq_u32_f32(vabsx0), vexp_bias);
uint32x4_t vbias1 = vaddq_u32(vreinterpretq_u32_f32(vabsx1), vexp_bias);
uint32x4_t vbias2 = vaddq_u32(vreinterpretq_u32_f32(vabsx2), vexp_bias);
uint32x4_t vbias3 = vaddq_u32(vreinterpretq_u32_f32(vabsx3), vexp_bias);
uint32x4_t vbias4 = vaddq_u32(vreinterpretq_u32_f32(vabsx4), vexp_bias);
uint32x4_t vbias5 = vaddq_u32(vreinterpretq_u32_f32(vabsx5), vexp_bias);
float32x4_t vf0 = vmulq_f32(vabsx0, vscale_to_inf);
float32x4_t vf1 = vmulq_f32(vabsx1, vscale_to_inf);
float32x4_t vf2 = vmulq_f32(vabsx2, vscale_to_inf);
float32x4_t vf3 = vmulq_f32(vabsx3, vscale_to_inf);
float32x4_t vf4 = vmulq_f32(vabsx4, vscale_to_inf);
float32x4_t vf5 = vmulq_f32(vabsx5, vscale_to_inf);
const uint32x4_t vnanmaskw0 = vcgtq_u32(vreinterpretq_u32_f32(vabsx0), vexpw_max);
const uint32x4_t vnanmaskw1 = vcgtq_u32(vreinterpretq_u32_f32(vabsx1), vexpw_max);
const uint32x4_t vnanmaskw2 = vcgtq_u32(vreinterpretq_u32_f32(vabsx2), vexpw_max);
const uint32x4_t vnanmaskw3 = vcgtq_u32(vreinterpretq_u32_f32(vabsx3), vexpw_max);
const uint32x4_t vnanmaskw4 = vcgtq_u32(vreinterpretq_u32_f32(vabsx4), vexpw_max);
const uint32x4_t vnanmaskw5 = vcgtq_u32(vreinterpretq_u32_f32(vabsx5), vexpw_max);
vbias0 = vandq_u32(vbias0, vexpw_max);
vbias1 = vandq_u32(vbias1, vexpw_max);
vbias2 = vandq_u32(vbias2, vexpw_max);
vbias3 = vandq_u32(vbias3, vexpw_max);
vbias4 = vandq_u32(vbias4, vexpw_max);
vbias5 = vandq_u32(vbias5, vexpw_max);
vf0 = vmulq_f32(vf0, vscale_to_zero);
vf1 = vmulq_f32(vf1, vscale_to_zero);
vf2 = vmulq_f32(vf2, vscale_to_zero);
vf3 = vmulq_f32(vf3, vscale_to_zero);
vf4 = vmulq_f32(vf4, vscale_to_zero);
vf5 = vmulq_f32(vf5, vscale_to_zero);
const uint16x8_t vnanmaskh0 = vcombine_u16(vmovn_u32(vnanmaskw0), vmovn_u32(vnanmaskw1));
const uint16x8_t vnanmaskh1 = vcombine_u16(vmovn_u32(vnanmaskw2), vmovn_u32(vnanmaskw3));
const uint16x8_t vnanmaskh2 = vcombine_u16(vmovn_u32(vnanmaskw4), vmovn_u32(vnanmaskw5));
vbias0 = vmaxq_u32(vbias0, vbias_min);
vbias1 = vmaxq_u32(vbias1, vbias_min);
vbias2 = vmaxq_u32(vbias2, vbias_min);
vbias3 = vmaxq_u32(vbias3, vbias_min);
vbias4 = vmaxq_u32(vbias4, vbias_min);
vbias5 = vmaxq_u32(vbias5, vbias_min);
vf0 = vaddq_f32(vf0, vreinterpretq_f32_u32(vbias0));
vf1 = vaddq_f32(vf1, vreinterpretq_f32_u32(vbias1));
vf2 = vaddq_f32(vf2, vreinterpretq_f32_u32(vbias2));
vf3 = vaddq_f32(vf3, vreinterpretq_f32_u32(vbias3));
vf4 = vaddq_f32(vf4, vreinterpretq_f32_u32(vbias4));
vf5 = vaddq_f32(vf5, vreinterpretq_f32_u32(vbias5));
uint16x8_t vexph0 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vf0), 13), vshrn_n_u32(vreinterpretq_u32_f32(vf1), 13));
uint16x8_t vexph1 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vf2), 13), vshrn_n_u32(vreinterpretq_u32_f32(vf3), 13));
uint16x8_t vexph2 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vf4), 13), vshrn_n_u32(vreinterpretq_u32_f32(vf5), 13));
uint16x8_t vmanth0 = vcombine_u16(vmovn_u32(vreinterpretq_u32_f32(vf0)), vmovn_u32(vreinterpretq_u32_f32(vf1)));
uint16x8_t vmanth1 = vcombine_u16(vmovn_u32(vreinterpretq_u32_f32(vf2)), vmovn_u32(vreinterpretq_u32_f32(vf3)));
uint16x8_t vmanth2 = vcombine_u16(vmovn_u32(vreinterpretq_u32_f32(vf4)), vmovn_u32(vreinterpretq_u32_f32(vf5)));
uint16x8_t vsignh0 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vx0), 16), vshrn_n_u32(vreinterpretq_u32_f32(vx1), 16));
uint16x8_t vsignh1 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vx2), 16), vshrn_n_u32(vreinterpretq_u32_f32(vx3), 16));
uint16x8_t vsignh2 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vx4), 16), vshrn_n_u32(vreinterpretq_u32_f32(vx5), 16));
vexph0 = vandq_u16(vexph0, vexph_mask);
vexph1 = vandq_u16(vexph1, vexph_mask);
vexph2 = vandq_u16(vexph2, vexph_mask);
vmanth0 = vandq_u16(vmanth0, vmanth_mask);
vmanth1 = vandq_u16(vmanth1, vmanth_mask);
vmanth2 = vandq_u16(vmanth2, vmanth_mask);
vsignh0 = vandq_u16(vsignh0, vsignh_mask);
vsignh1 = vandq_u16(vsignh1, vsignh_mask);
vsignh2 = vandq_u16(vsignh2, vsignh_mask);
uint16x8_t vh0 = vaddq_u16(vmanth0, vexph0);
uint16x8_t vh1 = vaddq_u16(vmanth1, vexph1);
uint16x8_t vh2 = vaddq_u16(vmanth2, vexph2);
vh0 = vbslq_u16(vnanmaskh0, vnanh, vh0);
vh1 = vbslq_u16(vnanmaskh1, vnanh, vh1);
vh2 = vbslq_u16(vnanmaskh2, vnanh, vh2);
vh0 = vorrq_u16(vh0, vsignh0);
vh1 = vorrq_u16(vh1, vsignh1);
vh2 = vorrq_u16(vh2, vsignh2);
vst1q_u16(o, vh0); o += 8;
vst1q_u16(o, vh1); o += 8;
vst1q_u16(o, vh2); o += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vabsx = vabsq_f32(vx);
uint32x4_t vbias = vaddq_u32(vreinterpretq_u32_f32(vabsx), vexp_bias);
float32x4_t vf = vmulq_f32(vabsx, vscale_to_inf);
const uint32x4_t vnanmaskw = vcgtq_u32(vreinterpretq_u32_f32(vabsx), vexpw_max);
vbias = vandq_u32(vbias, vexpw_max);
vf = vmulq_f32(vf, vscale_to_zero);
const uint16x4_t vnanmaskh = vmovn_u32(vnanmaskw);
vbias = vmaxq_u32(vbias, vbias_min);
vf = vaddq_f32(vf, vreinterpretq_f32_u32(vbias));
uint16x4_t vexph = vshrn_n_u32(vreinterpretq_u32_f32(vf), 13);
uint16x4_t vmanth = vmovn_u32(vreinterpretq_u32_f32(vf));
uint16x4_t vsignh = vshrn_n_u32(vreinterpretq_u32_f32(vx), 16);
vexph = vand_u16(vexph, vget_low_u16(vexph_mask));
vmanth = vand_u16(vmanth, vget_low_u16(vmanth_mask));
vsignh = vand_u16(vsignh, vget_low_u16(vsignh_mask));
uint16x4_t vh = vadd_u16(vmanth, vexph);
vh = vbsl_u16(vnanmaskh, vget_low_u16(vnanh), vh);
vh = vorr_u16(vh, vsignh);
vst1_u16(o, vh); o += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch % sizeof(float) == 0);
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vabsx = vabsq_f32(vx);
uint32x4_t vbias = vaddq_u32(vreinterpretq_u32_f32(vabsx), vexp_bias);
float32x4_t vf = vmulq_f32(vabsx, vscale_to_inf);
const uint32x4_t vnanmaskw = vcgtq_u32(vreinterpretq_u32_f32(vabsx), vexpw_max);
vbias = vandq_u32(vbias, vexpw_max);
vf = vmulq_f32(vf, vscale_to_zero);
const uint16x4_t vnanmaskh = vmovn_u32(vnanmaskw);
vbias = vmaxq_u32(vbias, vbias_min);
vf = vaddq_f32(vf, vreinterpretq_f32_u32(vbias));
uint16x4_t vexph = vshrn_n_u32(vreinterpretq_u32_f32(vf), 13);
uint16x4_t vmanth = vmovn_u32(vreinterpretq_u32_f32(vf));
uint16x4_t vsignh = vshrn_n_u32(vreinterpretq_u32_f32(vx), 16);
vexph = vand_u16(vexph, vget_low_u16(vexph_mask));
vmanth = vand_u16(vmanth, vget_low_u16(vmanth_mask));
vsignh = vand_u16(vsignh, vget_low_u16(vsignh_mask));
uint16x4_t vh = vadd_u16(vmanth, vexph);
vh = vbsl_u16(vnanmaskh, vget_low_u16(vnanh), vh);
vh = vorr_u16(vh, vsignh);
if (batch & (2 * sizeof(float))) {
vst1_lane_u32((void*) o, vreinterpret_u32_u16(vh), 0); o += 2;
vh = vext_u16(vh, vh, 2);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_u16(o, vh, 0);
}
}
}
| 9,360 | 41.744292 | 128 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-neon-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__neon_x32(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const uint32x4_t vexp_bias = vld1q_dup_u32(¶ms->neon.exp_bias);
const float32x4_t vscale_to_inf = vld1q_dup_f32(¶ms->neon.scale_to_inf);
const uint32x4_t vexpw_max = vld1q_dup_u32(¶ms->neon.expw_max);
const float32x4_t vscale_to_zero = vld1q_dup_f32(¶ms->neon.scale_to_zero);
const uint32x4_t vbias_min = vdupq_n_u32(UINT32_C(0x40000000));
const uint16x8_t vexph_mask = vdupq_n_u16(UINT16_C(0x7C00));
const uint16x8_t vmanth_mask = vdupq_n_u16(UINT16_C(0x0FFF));
const uint16x8_t vsignh_mask = vdupq_n_u16(UINT16_C(0x8000));
const uint16x8_t vnanh = vdupq_n_u16(UINT16_C(0x7E00));
uint16_t* o = (uint16_t*) output;
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const float32x4_t vx0 = vld1q_f32(input); input += 4;
const float32x4_t vx1 = vld1q_f32(input); input += 4;
const float32x4_t vx2 = vld1q_f32(input); input += 4;
const float32x4_t vx3 = vld1q_f32(input); input += 4;
const float32x4_t vx4 = vld1q_f32(input); input += 4;
const float32x4_t vx5 = vld1q_f32(input); input += 4;
const float32x4_t vx6 = vld1q_f32(input); input += 4;
const float32x4_t vx7 = vld1q_f32(input); input += 4;
const float32x4_t vabsx0 = vabsq_f32(vx0);
const float32x4_t vabsx1 = vabsq_f32(vx1);
const float32x4_t vabsx2 = vabsq_f32(vx2);
const float32x4_t vabsx3 = vabsq_f32(vx3);
const float32x4_t vabsx4 = vabsq_f32(vx4);
const float32x4_t vabsx5 = vabsq_f32(vx5);
const float32x4_t vabsx6 = vabsq_f32(vx6);
const float32x4_t vabsx7 = vabsq_f32(vx7);
uint32x4_t vbias0 = vaddq_u32(vreinterpretq_u32_f32(vabsx0), vexp_bias);
uint32x4_t vbias1 = vaddq_u32(vreinterpretq_u32_f32(vabsx1), vexp_bias);
uint32x4_t vbias2 = vaddq_u32(vreinterpretq_u32_f32(vabsx2), vexp_bias);
uint32x4_t vbias3 = vaddq_u32(vreinterpretq_u32_f32(vabsx3), vexp_bias);
uint32x4_t vbias4 = vaddq_u32(vreinterpretq_u32_f32(vabsx4), vexp_bias);
uint32x4_t vbias5 = vaddq_u32(vreinterpretq_u32_f32(vabsx5), vexp_bias);
uint32x4_t vbias6 = vaddq_u32(vreinterpretq_u32_f32(vabsx6), vexp_bias);
uint32x4_t vbias7 = vaddq_u32(vreinterpretq_u32_f32(vabsx7), vexp_bias);
float32x4_t vf0 = vmulq_f32(vabsx0, vscale_to_inf);
float32x4_t vf1 = vmulq_f32(vabsx1, vscale_to_inf);
float32x4_t vf2 = vmulq_f32(vabsx2, vscale_to_inf);
float32x4_t vf3 = vmulq_f32(vabsx3, vscale_to_inf);
float32x4_t vf4 = vmulq_f32(vabsx4, vscale_to_inf);
float32x4_t vf5 = vmulq_f32(vabsx5, vscale_to_inf);
float32x4_t vf6 = vmulq_f32(vabsx6, vscale_to_inf);
float32x4_t vf7 = vmulq_f32(vabsx7, vscale_to_inf);
const uint32x4_t vnanmaskw0 = vcgtq_u32(vreinterpretq_u32_f32(vabsx0), vexpw_max);
const uint32x4_t vnanmaskw1 = vcgtq_u32(vreinterpretq_u32_f32(vabsx1), vexpw_max);
const uint32x4_t vnanmaskw2 = vcgtq_u32(vreinterpretq_u32_f32(vabsx2), vexpw_max);
const uint32x4_t vnanmaskw3 = vcgtq_u32(vreinterpretq_u32_f32(vabsx3), vexpw_max);
const uint32x4_t vnanmaskw4 = vcgtq_u32(vreinterpretq_u32_f32(vabsx4), vexpw_max);
const uint32x4_t vnanmaskw5 = vcgtq_u32(vreinterpretq_u32_f32(vabsx5), vexpw_max);
const uint32x4_t vnanmaskw6 = vcgtq_u32(vreinterpretq_u32_f32(vabsx6), vexpw_max);
const uint32x4_t vnanmaskw7 = vcgtq_u32(vreinterpretq_u32_f32(vabsx7), vexpw_max);
vbias0 = vandq_u32(vbias0, vexpw_max);
vbias1 = vandq_u32(vbias1, vexpw_max);
vbias2 = vandq_u32(vbias2, vexpw_max);
vbias3 = vandq_u32(vbias3, vexpw_max);
vbias4 = vandq_u32(vbias4, vexpw_max);
vbias5 = vandq_u32(vbias5, vexpw_max);
vbias6 = vandq_u32(vbias6, vexpw_max);
vbias7 = vandq_u32(vbias7, vexpw_max);
vf0 = vmulq_f32(vf0, vscale_to_zero);
vf1 = vmulq_f32(vf1, vscale_to_zero);
vf2 = vmulq_f32(vf2, vscale_to_zero);
vf3 = vmulq_f32(vf3, vscale_to_zero);
vf4 = vmulq_f32(vf4, vscale_to_zero);
vf5 = vmulq_f32(vf5, vscale_to_zero);
vf6 = vmulq_f32(vf6, vscale_to_zero);
vf7 = vmulq_f32(vf7, vscale_to_zero);
const uint16x8_t vnanmaskh0 = vcombine_u16(vmovn_u32(vnanmaskw0), vmovn_u32(vnanmaskw1));
const uint16x8_t vnanmaskh1 = vcombine_u16(vmovn_u32(vnanmaskw2), vmovn_u32(vnanmaskw3));
const uint16x8_t vnanmaskh2 = vcombine_u16(vmovn_u32(vnanmaskw4), vmovn_u32(vnanmaskw5));
const uint16x8_t vnanmaskh3 = vcombine_u16(vmovn_u32(vnanmaskw6), vmovn_u32(vnanmaskw7));
vbias0 = vmaxq_u32(vbias0, vbias_min);
vbias1 = vmaxq_u32(vbias1, vbias_min);
vbias2 = vmaxq_u32(vbias2, vbias_min);
vbias3 = vmaxq_u32(vbias3, vbias_min);
vbias4 = vmaxq_u32(vbias4, vbias_min);
vbias5 = vmaxq_u32(vbias5, vbias_min);
vbias6 = vmaxq_u32(vbias6, vbias_min);
vbias7 = vmaxq_u32(vbias7, vbias_min);
vf0 = vaddq_f32(vf0, vreinterpretq_f32_u32(vbias0));
vf1 = vaddq_f32(vf1, vreinterpretq_f32_u32(vbias1));
vf2 = vaddq_f32(vf2, vreinterpretq_f32_u32(vbias2));
vf3 = vaddq_f32(vf3, vreinterpretq_f32_u32(vbias3));
vf4 = vaddq_f32(vf4, vreinterpretq_f32_u32(vbias4));
vf5 = vaddq_f32(vf5, vreinterpretq_f32_u32(vbias5));
vf6 = vaddq_f32(vf6, vreinterpretq_f32_u32(vbias6));
vf7 = vaddq_f32(vf7, vreinterpretq_f32_u32(vbias7));
uint16x8_t vexph0 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vf0), 13), vshrn_n_u32(vreinterpretq_u32_f32(vf1), 13));
uint16x8_t vexph1 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vf2), 13), vshrn_n_u32(vreinterpretq_u32_f32(vf3), 13));
uint16x8_t vexph2 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vf4), 13), vshrn_n_u32(vreinterpretq_u32_f32(vf5), 13));
uint16x8_t vexph3 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vf6), 13), vshrn_n_u32(vreinterpretq_u32_f32(vf7), 13));
uint16x8_t vmanth0 = vcombine_u16(vmovn_u32(vreinterpretq_u32_f32(vf0)), vmovn_u32(vreinterpretq_u32_f32(vf1)));
uint16x8_t vmanth1 = vcombine_u16(vmovn_u32(vreinterpretq_u32_f32(vf2)), vmovn_u32(vreinterpretq_u32_f32(vf3)));
uint16x8_t vmanth2 = vcombine_u16(vmovn_u32(vreinterpretq_u32_f32(vf4)), vmovn_u32(vreinterpretq_u32_f32(vf5)));
uint16x8_t vmanth3 = vcombine_u16(vmovn_u32(vreinterpretq_u32_f32(vf6)), vmovn_u32(vreinterpretq_u32_f32(vf7)));
uint16x8_t vsignh0 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vx0), 16), vshrn_n_u32(vreinterpretq_u32_f32(vx1), 16));
uint16x8_t vsignh1 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vx2), 16), vshrn_n_u32(vreinterpretq_u32_f32(vx3), 16));
uint16x8_t vsignh2 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vx4), 16), vshrn_n_u32(vreinterpretq_u32_f32(vx5), 16));
uint16x8_t vsignh3 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vx6), 16), vshrn_n_u32(vreinterpretq_u32_f32(vx7), 16));
vexph0 = vandq_u16(vexph0, vexph_mask);
vexph1 = vandq_u16(vexph1, vexph_mask);
vexph2 = vandq_u16(vexph2, vexph_mask);
vexph3 = vandq_u16(vexph3, vexph_mask);
vmanth0 = vandq_u16(vmanth0, vmanth_mask);
vmanth1 = vandq_u16(vmanth1, vmanth_mask);
vmanth2 = vandq_u16(vmanth2, vmanth_mask);
vmanth3 = vandq_u16(vmanth3, vmanth_mask);
vsignh0 = vandq_u16(vsignh0, vsignh_mask);
vsignh1 = vandq_u16(vsignh1, vsignh_mask);
vsignh2 = vandq_u16(vsignh2, vsignh_mask);
vsignh3 = vandq_u16(vsignh3, vsignh_mask);
uint16x8_t vh0 = vaddq_u16(vmanth0, vexph0);
uint16x8_t vh1 = vaddq_u16(vmanth1, vexph1);
uint16x8_t vh2 = vaddq_u16(vmanth2, vexph2);
uint16x8_t vh3 = vaddq_u16(vmanth3, vexph3);
vh0 = vbslq_u16(vnanmaskh0, vnanh, vh0);
vh1 = vbslq_u16(vnanmaskh1, vnanh, vh1);
vh2 = vbslq_u16(vnanmaskh2, vnanh, vh2);
vh3 = vbslq_u16(vnanmaskh3, vnanh, vh3);
vh0 = vorrq_u16(vh0, vsignh0);
vh1 = vorrq_u16(vh1, vsignh1);
vh2 = vorrq_u16(vh2, vsignh2);
vh3 = vorrq_u16(vh3, vsignh3);
vst1q_u16(o, vh0); o += 8;
vst1q_u16(o, vh1); o += 8;
vst1q_u16(o, vh2); o += 8;
vst1q_u16(o, vh3); o += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vabsx = vabsq_f32(vx);
uint32x4_t vbias = vaddq_u32(vreinterpretq_u32_f32(vabsx), vexp_bias);
float32x4_t vf = vmulq_f32(vabsx, vscale_to_inf);
const uint32x4_t vnanmaskw = vcgtq_u32(vreinterpretq_u32_f32(vabsx), vexpw_max);
vbias = vandq_u32(vbias, vexpw_max);
vf = vmulq_f32(vf, vscale_to_zero);
const uint16x4_t vnanmaskh = vmovn_u32(vnanmaskw);
vbias = vmaxq_u32(vbias, vbias_min);
vf = vaddq_f32(vf, vreinterpretq_f32_u32(vbias));
uint16x4_t vexph = vshrn_n_u32(vreinterpretq_u32_f32(vf), 13);
uint16x4_t vmanth = vmovn_u32(vreinterpretq_u32_f32(vf));
uint16x4_t vsignh = vshrn_n_u32(vreinterpretq_u32_f32(vx), 16);
vexph = vand_u16(vexph, vget_low_u16(vexph_mask));
vmanth = vand_u16(vmanth, vget_low_u16(vmanth_mask));
vsignh = vand_u16(vsignh, vget_low_u16(vsignh_mask));
uint16x4_t vh = vadd_u16(vmanth, vexph);
vh = vbsl_u16(vnanmaskh, vget_low_u16(vnanh), vh);
vh = vorr_u16(vh, vsignh);
vst1_u16(o, vh); o += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch % sizeof(float) == 0);
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vabsx = vabsq_f32(vx);
uint32x4_t vbias = vaddq_u32(vreinterpretq_u32_f32(vabsx), vexp_bias);
float32x4_t vf = vmulq_f32(vabsx, vscale_to_inf);
const uint32x4_t vnanmaskw = vcgtq_u32(vreinterpretq_u32_f32(vabsx), vexpw_max);
vbias = vandq_u32(vbias, vexpw_max);
vf = vmulq_f32(vf, vscale_to_zero);
const uint16x4_t vnanmaskh = vmovn_u32(vnanmaskw);
vbias = vmaxq_u32(vbias, vbias_min);
vf = vaddq_f32(vf, vreinterpretq_f32_u32(vbias));
uint16x4_t vexph = vshrn_n_u32(vreinterpretq_u32_f32(vf), 13);
uint16x4_t vmanth = vmovn_u32(vreinterpretq_u32_f32(vf));
uint16x4_t vsignh = vshrn_n_u32(vreinterpretq_u32_f32(vx), 16);
vexph = vand_u16(vexph, vget_low_u16(vexph_mask));
vmanth = vand_u16(vmanth, vget_low_u16(vmanth_mask));
vsignh = vand_u16(vsignh, vget_low_u16(vsignh_mask));
uint16x4_t vh = vadd_u16(vmanth, vexph);
vh = vbsl_u16(vnanmaskh, vget_low_u16(vnanh), vh);
vh = vorr_u16(vh, vsignh);
if (batch & (2 * sizeof(float))) {
vst1_lane_u32((void*) o, vreinterpret_u32_u16(vh), 0); o += 2;
vh = vext_u16(vh, vh, 2);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_u16(o, vh, 0);
}
}
}
| 11,146 | 43.947581 | 128 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__neon_x8(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const uint32x4_t vexp_bias = vld1q_dup_u32(¶ms->neon.exp_bias);
const float32x4_t vscale_to_inf = vld1q_dup_f32(¶ms->neon.scale_to_inf);
const uint32x4_t vexpw_max = vld1q_dup_u32(¶ms->neon.expw_max);
const float32x4_t vscale_to_zero = vld1q_dup_f32(¶ms->neon.scale_to_zero);
const uint32x4_t vbias_min = vdupq_n_u32(UINT32_C(0x40000000));
const uint16x8_t vexph_mask = vdupq_n_u16(UINT16_C(0x7C00));
const uint16x8_t vmanth_mask = vdupq_n_u16(UINT16_C(0x0FFF));
const uint16x8_t vsignh_mask = vdupq_n_u16(UINT16_C(0x8000));
const uint16x8_t vnanh = vdupq_n_u16(UINT16_C(0x7E00));
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0 = vld1q_f32(input); input += 4;
const float32x4_t vx1 = vld1q_f32(input); input += 4;
const float32x4_t vabsx0 = vabsq_f32(vx0);
const float32x4_t vabsx1 = vabsq_f32(vx1);
uint32x4_t vbias0 = vaddq_u32(vreinterpretq_u32_f32(vabsx0), vexp_bias);
uint32x4_t vbias1 = vaddq_u32(vreinterpretq_u32_f32(vabsx1), vexp_bias);
float32x4_t vf0 = vmulq_f32(vabsx0, vscale_to_inf);
float32x4_t vf1 = vmulq_f32(vabsx1, vscale_to_inf);
const uint32x4_t vnanmaskw0 = vcgtq_u32(vreinterpretq_u32_f32(vabsx0), vexpw_max);
const uint32x4_t vnanmaskw1 = vcgtq_u32(vreinterpretq_u32_f32(vabsx1), vexpw_max);
vbias0 = vandq_u32(vbias0, vexpw_max);
vbias1 = vandq_u32(vbias1, vexpw_max);
vf0 = vmulq_f32(vf0, vscale_to_zero);
vf1 = vmulq_f32(vf1, vscale_to_zero);
const uint16x8_t vnanmaskh0 = vcombine_u16(vmovn_u32(vnanmaskw0), vmovn_u32(vnanmaskw1));
vbias0 = vmaxq_u32(vbias0, vbias_min);
vbias1 = vmaxq_u32(vbias1, vbias_min);
vf0 = vaddq_f32(vf0, vreinterpretq_f32_u32(vbias0));
vf1 = vaddq_f32(vf1, vreinterpretq_f32_u32(vbias1));
uint16x8_t vexph0 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vf0), 13), vshrn_n_u32(vreinterpretq_u32_f32(vf1), 13));
uint16x8_t vmanth0 = vcombine_u16(vmovn_u32(vreinterpretq_u32_f32(vf0)), vmovn_u32(vreinterpretq_u32_f32(vf1)));
uint16x8_t vsignh0 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vx0), 16), vshrn_n_u32(vreinterpretq_u32_f32(vx1), 16));
vexph0 = vandq_u16(vexph0, vexph_mask);
vmanth0 = vandq_u16(vmanth0, vmanth_mask);
vsignh0 = vandq_u16(vsignh0, vsignh_mask);
uint16x8_t vh0 = vaddq_u16(vmanth0, vexph0);
vh0 = vbslq_u16(vnanmaskh0, vnanh, vh0);
vh0 = vorrq_u16(vh0, vsignh0);
vst1q_u16(o, vh0); o += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vabsx = vabsq_f32(vx);
uint32x4_t vbias = vaddq_u32(vreinterpretq_u32_f32(vabsx), vexp_bias);
float32x4_t vf = vmulq_f32(vabsx, vscale_to_inf);
const uint32x4_t vnanmaskw = vcgtq_u32(vreinterpretq_u32_f32(vabsx), vexpw_max);
vbias = vandq_u32(vbias, vexpw_max);
vf = vmulq_f32(vf, vscale_to_zero);
const uint16x4_t vnanmaskh = vmovn_u32(vnanmaskw);
vbias = vmaxq_u32(vbias, vbias_min);
vf = vaddq_f32(vf, vreinterpretq_f32_u32(vbias));
uint16x4_t vexph = vshrn_n_u32(vreinterpretq_u32_f32(vf), 13);
uint16x4_t vmanth = vmovn_u32(vreinterpretq_u32_f32(vf));
uint16x4_t vsignh = vshrn_n_u32(vreinterpretq_u32_f32(vx), 16);
vexph = vand_u16(vexph, vget_low_u16(vexph_mask));
vmanth = vand_u16(vmanth, vget_low_u16(vmanth_mask));
vsignh = vand_u16(vsignh, vget_low_u16(vsignh_mask));
uint16x4_t vh = vadd_u16(vmanth, vexph);
vh = vbsl_u16(vnanmaskh, vget_low_u16(vnanh), vh);
vh = vorr_u16(vh, vsignh);
vst1_u16(o, vh); o += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch % sizeof(float) == 0);
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vabsx = vabsq_f32(vx);
uint32x4_t vbias = vaddq_u32(vreinterpretq_u32_f32(vabsx), vexp_bias);
float32x4_t vf = vmulq_f32(vabsx, vscale_to_inf);
const uint32x4_t vnanmaskw = vcgtq_u32(vreinterpretq_u32_f32(vabsx), vexpw_max);
vbias = vandq_u32(vbias, vexpw_max);
vf = vmulq_f32(vf, vscale_to_zero);
const uint16x4_t vnanmaskh = vmovn_u32(vnanmaskw);
vbias = vmaxq_u32(vbias, vbias_min);
vf = vaddq_f32(vf, vreinterpretq_f32_u32(vbias));
uint16x4_t vexph = vshrn_n_u32(vreinterpretq_u32_f32(vf), 13);
uint16x4_t vmanth = vmovn_u32(vreinterpretq_u32_f32(vf));
uint16x4_t vsignh = vshrn_n_u32(vreinterpretq_u32_f32(vx), 16);
vexph = vand_u16(vexph, vget_low_u16(vexph_mask));
vmanth = vand_u16(vmanth, vget_low_u16(vmanth_mask));
vsignh = vand_u16(vsignh, vget_low_u16(vsignh_mask));
uint16x4_t vh = vadd_u16(vmanth, vexph);
vh = vbsl_u16(vnanmaskh, vget_low_u16(vnanh), vh);
vh = vorr_u16(vh, vsignh);
if (batch & (2 * sizeof(float))) {
vst1_lane_u32((void*) o, vreinterpret_u32_u16(vh), 0); o += 2;
vh = vext_u16(vh, vh, 2);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_u16(o, vh, 0);
}
}
}
| 5,785 | 34.937888 | 128 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.