repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f16-vunary/gen/f16-vsqr-neonfp16arith-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vunary/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsqr_ukernel__neonfp16arith_x16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
float16x8_t vacc0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vacc1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc0 = vmulq_f16(vacc0, vacc0);
vacc1 = vmulq_f16(vacc1, vacc1);
vst1q_u16(o, vreinterpretq_u16_f16(vacc0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vacc1)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc = vmulq_f16(vacc, vacc);
vst1q_u16(o, vreinterpretq_u16_f16(vacc)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i));
vacc = vmulq_f16(vacc, vacc);
float16x4_t vacc_lo = vget_low_f16(vacc);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vacc_lo)); o += 4;
vacc_lo = vget_high_f16(vacc);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2;
vacc_lo = vext_f16(vacc_lo, vacc_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vacc_lo), 0);
}
}
}
| 2,015 | 31 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vunary/gen/f16-vsqr-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vunary/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsqr_ukernel__neonfp16arith_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc = vmulq_f16(vacc, vacc);
vst1q_u16(o, vreinterpretq_u16_f16(vacc)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i));
vacc = vmulq_f16(vacc, vacc);
float16x4_t vacc_lo = vget_low_f16(vacc);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vacc_lo)); o += 4;
vacc_lo = vget_high_f16(vacc);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2;
vacc_lo = vext_f16(vacc_lo, vacc_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vacc_lo), 0);
}
}
}
| 1,609 | 29.377358 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-argmaxpool/f32-argmaxpool-4x-neon-c4.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/argmaxpool.h>
void xnn_f32_argmaxpool_ukernel_4x__neon_c4(
size_t output_pixels,
size_t pooling_elements,
size_t channels,
const float** input,
size_t input_offset,
float* output,
uint32_t* index,
size_t input_increment,
size_t output_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(pooling_elements != 0);
assert(pooling_elements <= 4);
assert(channels != 0);
do {
const float* i0 = input[0];
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
if (pooling_elements < 2) {
i1 = i0;
}
if (pooling_elements <= 2) {
i2 = i0;
}
if (pooling_elements != 4) {
i3 = i0;
}
size_t c = channels;
for (; c >= 4; c -= 4) {
const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
float32x4_t vmax = vi0;
uint32x4_t vidx = vmovq_n_u32(0);
const uint32x4_t vm1 = vcgtq_f32(vi1, vmax);
vmax = vbslq_f32(vm1, vi1, vmax);
vidx = vbslq_u32(vm1, vmovq_n_u32(1), vidx);
const uint32x4_t vm2 = vcgtq_f32(vi2, vmax);
vmax = vbslq_f32(vm2, vi2, vmax);
vidx = vbslq_u32(vm2, vmovq_n_u32(2), vidx);
const uint32x4_t vm3 = vcgtq_f32(vi3, vmax);
vmax = vbslq_f32(vm3, vi3, vmax);
vidx = vbslq_u32(vm3, vmovq_n_u32(3), vidx);
vst1q_f32(output, vmax); output += 4;
vst1q_u32(index, vidx); index += 4;
}
if (c != 0) {
const float32x4_t vi0 = vld1q_f32(i0);
const float32x4_t vi1 = vld1q_f32(i1);
const float32x4_t vi2 = vld1q_f32(i2);
const float32x4_t vi3 = vld1q_f32(i3);
float32x4_t vmax = vi0;
uint32x4_t vidx = vmovq_n_u32(0);
const uint32x4_t vm1 = vcgtq_f32(vi1, vmax);
vmax = vbslq_f32(vm1, vi1, vmax);
vidx = vbslq_u32(vm1, vmovq_n_u32(1), vidx);
const uint32x4_t vm2 = vcgtq_f32(vi2, vmax);
vmax = vbslq_f32(vm2, vi2, vmax);
vidx = vbslq_u32(vm2, vmovq_n_u32(2), vidx);
const uint32x4_t vm3 = vcgtq_f32(vi3, vmax);
vmax = vbslq_f32(vm3, vi3, vmax);
vidx = vbslq_u32(vm3, vmovq_n_u32(3), vidx);
float32x2_t vmax_lo = vget_low_f32(vmax);
uint32x2_t vidx_lo = vget_low_u32(vidx);
if (c & 2) {
vst1_f32(output, vmax_lo); output += 2;
vst1_u32(index, vidx_lo); index += 2;
vmax_lo = vget_high_f32(vmax);
vidx_lo = vget_high_u32(vidx);
}
if (c & 1) {
vst1_lane_f32(output, vmax_lo, 0); output += 1;
vst1_lane_u32(index, vidx_lo, 0); index += 1;
}
}
input = (const float**) ((uintptr_t) input + input_increment);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 3,339 | 29.09009 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-argmaxpool/f32-argmaxpool-4x-scalar-c1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/argmaxpool.h>
#include <xnnpack/math.h>
void xnn_f32_argmaxpool_ukernel_4x__scalar_c1(
size_t output_pixels,
size_t pooling_elements,
size_t channels,
const float** input,
size_t input_offset,
float* output,
uint32_t* index,
size_t input_increment,
size_t output_increment)
{
assert(output_pixels != 0);
assert(pooling_elements != 0);
assert(pooling_elements <= 4);
assert(channels != 0);
do {
const float* i0 = input[0];
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
if (pooling_elements < 2) {
i1 = i0;
}
if (pooling_elements <= 2) {
i2 = i0;
}
if (pooling_elements != 4) {
i3 = i0;
}
size_t c = channels;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
float vmax = vi0;
uint32_t vidx = 0;
if (vi1 > vmax) {
vmax = vi1;
vidx = 1;
}
if (vi2 > vmax) {
vmax = vi2;
vidx = 2;
}
if (vi3 > vmax) {
vmax = vi3;
vidx = 3;
}
*output++ = vmax;
*index++ = vidx;
} while (--c != 0);
input = (const float**) ((uintptr_t) input + input_increment);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 1,824 | 22.101266 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-argmaxpool/f32-argmaxpool-4x-sse2-c4.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/argmaxpool.h>
void xnn_f32_argmaxpool_ukernel_4x__sse2_c4(
size_t output_pixels,
size_t pooling_elements,
size_t channels,
const float** input,
size_t input_offset,
float* output,
uint32_t* index,
size_t input_increment,
size_t output_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(pooling_elements != 0);
assert(pooling_elements <= 4);
assert(channels != 0);
do {
const float* i0 = input[0];
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
if (pooling_elements < 2) {
i1 = i0;
}
if (pooling_elements <= 2) {
i2 = i0;
}
if (pooling_elements != 4) {
i3 = i0;
}
size_t c = channels;
for (; c >= 4; c -= 4) {
const __m128 vi0 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3 = _mm_loadu_ps(i3);
i3 += 4;
__m128 vmax = vi0;
__m128i vidx = _mm_setzero_si128();
const __m128i vm1 = _mm_castps_si128(_mm_cmpgt_ps(vi1, vmax));
vmax = _mm_max_ps(vi1, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, _mm_set1_epi32(1)));
const __m128i vm2 = _mm_castps_si128(_mm_cmpgt_ps(vi2, vmax));
vmax = _mm_max_ps(vi2, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, _mm_set1_epi32(2)));
const __m128i vm3 = _mm_castps_si128(_mm_cmpgt_ps(vi3, vmax));
vmax = _mm_max_ps(vi3, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, _mm_set1_epi32(3)));
_mm_storeu_ps(output, vmax);
output += 4;
_mm_storeu_si128((__m128i*) index, vidx);
index += 4;
}
if (c != 0) {
const __m128 vi0 = _mm_loadu_ps(i0);
const __m128 vi1 = _mm_loadu_ps(i1);
const __m128 vi2 = _mm_loadu_ps(i2);
const __m128 vi3 = _mm_loadu_ps(i3);
__m128 vmax = vi0;
__m128i vidx = _mm_setzero_si128();
const __m128i vm1 = _mm_castps_si128(_mm_cmpgt_ps(vi1, vmax));
vmax = _mm_max_ps(vi1, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, _mm_set1_epi32(1)));
const __m128i vm2 = _mm_castps_si128(_mm_cmpgt_ps(vi2, vmax));
vmax = _mm_max_ps(vi2, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, _mm_set1_epi32(2)));
const __m128i vm3 = _mm_castps_si128(_mm_cmpgt_ps(vi3, vmax));
vmax = _mm_max_ps(vi3, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, _mm_set1_epi32(3)));
if (c & 2) {
_mm_storel_pi((__m64*) output, vmax);
_mm_storel_epi64((__m128i*) index, vidx);
vmax = _mm_movehl_ps(vmax, vmax);
vidx = _mm_unpackhi_epi64(vidx, vidx);
output += 2;
index += 2;
}
if (c & 1) {
_mm_store_ss(output, vmax);
*index = (uint32_t) _mm_cvtsi128_si32(vidx);
output += 1;
index += 1;
}
}
input = (const float**) ((uintptr_t) input + input_increment);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 3,703 | 30.12605 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-argmaxpool/f32-argmaxpool-4x-wasmsimd-c4.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/argmaxpool.h>
void xnn_f32_argmaxpool_ukernel_4x__wasmsimd_c4(
size_t output_pixels,
size_t pooling_elements,
size_t channels,
const float** input,
size_t input_offset,
float* output,
uint32_t* index_ptr,
size_t input_increment,
size_t output_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(pooling_elements != 0);
assert(pooling_elements <= 4);
assert(channels != 0);
float* index = (float*) index_ptr;
do {
const float* i0 = input[0];
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
if (pooling_elements < 2) {
i1 = i0;
}
if (pooling_elements <= 2) {
i2 = i0;
}
if (pooling_elements != 4) {
i3 = i0;
}
size_t c = channels;
for (; c >= 4; c -= 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
v128_t vmax = vi0;
v128_t vidx = wasm_i32x4_const_splat(0);
const v128_t vm1 = wasm_f32x4_gt(vi1, vmax);
vmax = wasm_v128_bitselect(vi1, vmax, vm1);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(1), vidx, vm1);
const v128_t vm2 = wasm_f32x4_gt(vi2, vmax);
vmax = wasm_v128_bitselect(vi2, vmax, vm2);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(2), vidx, vm2);
const v128_t vm3 = wasm_f32x4_gt(vi3, vmax);
vmax = wasm_v128_bitselect(vi3, vmax, vm3);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(3), vidx, vm3);
wasm_v128_store(output, vmax);
output += 4;
wasm_v128_store(index, vidx);
index += 4;
}
if (c != 0) {
const v128_t vi0 = wasm_v128_load(i0);
const v128_t vi1 = wasm_v128_load(i1);
const v128_t vi2 = wasm_v128_load(i2);
const v128_t vi3 = wasm_v128_load(i3);
v128_t vmax = vi0;
v128_t vidx = wasm_i32x4_const_splat(0);
const v128_t vm1 = wasm_f32x4_gt(vi1, vmax);
vmax = wasm_v128_bitselect(vi1, vmax, vm1);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(1), vidx, vm1);
const v128_t vm2 = wasm_f32x4_gt(vi2, vmax);
vmax = wasm_v128_bitselect(vi2, vmax, vm2);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(2), vidx, vm2);
const v128_t vm3 = wasm_f32x4_gt(vi3, vmax);
vmax = wasm_v128_bitselect(vi3, vmax, vm3);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(3), vidx, vm3);
if (c & 2) {
wasm_v128_store64_lane(output, vmax, 0);
wasm_v128_store64_lane(index, vidx, 0);
vmax = wasm_v64x2_shuffle(vmax, vmax, 1, 1);
vidx = wasm_v64x2_shuffle(vidx, vidx, 1, 1);
output += 2;
index += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vmax, 0);
wasm_v128_store32_lane(index, vidx, 0);
output += 1;
index += 1;
}
}
input = (const float**) ((uintptr_t) input + input_increment);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 3,631 | 29.266667 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-argmaxpool/f32-argmaxpool-9p8x-neon-c4.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/argmaxpool.h>
void xnn_f32_argmaxpool_ukernel_9p8x__neon_c4(
size_t output_pixels,
size_t pooling_elements,
size_t channels,
const float** input,
size_t input_offset,
float* accumulation_buffer,
uint32_t* index_buffer,
float* output,
uint32_t* index,
size_t input_increment,
size_t output_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(pooling_elements != 0);
assert(pooling_elements > 9);
assert(channels != 0);
do {
{
float* ab = accumulation_buffer;
uint32_t* ib = index_buffer;
const float* i0 = *input++;
const float* i1 = *input++;
const float* i2 = *input++;
const float* i3 = *input++;
const float* i4 = *input++;
const float* i5 = *input++;
const float* i6 = *input++;
const float* i7 = *input++;
const float* i8 = *input++;
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
i8 = (const float*) ((uintptr_t) i8 + input_offset);
for (size_t c = 0; c < channels; c += 4) {
const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi6 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi7 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi8 = vld1q_f32(i8); i8 += 4;
float32x4_t vmax = vi0;
uint32x4_t vidx = vmovq_n_u32(0);
const uint32x4_t vm1 = vcgtq_f32(vi1, vmax);
vmax = vbslq_f32(vm1, vi1, vmax);
vidx = vbslq_u32(vm1, vmovq_n_u32(1), vidx);
const uint32x4_t vm2 = vcgtq_f32(vi2, vmax);
vmax = vbslq_f32(vm2, vi2, vmax);
vidx = vbslq_u32(vm2, vmovq_n_u32(2), vidx);
const uint32x4_t vm3 = vcgtq_f32(vi3, vmax);
vmax = vbslq_f32(vm3, vi3, vmax);
vidx = vbslq_u32(vm3, vmovq_n_u32(3), vidx);
const uint32x4_t vm4 = vcgtq_f32(vi4, vmax);
vmax = vbslq_f32(vm4, vi4, vmax);
vidx = vbslq_u32(vm4, vmovq_n_u32(4), vidx);
const uint32x4_t vm5 = vcgtq_f32(vi5, vmax);
vmax = vbslq_f32(vm5, vi5, vmax);
vidx = vbslq_u32(vm5, vmovq_n_u32(5), vidx);
const uint32x4_t vm6 = vcgtq_f32(vi6, vmax);
vmax = vbslq_f32(vm6, vi6, vmax);
vidx = vbslq_u32(vm6, vmovq_n_u32(6), vidx);
const uint32x4_t vm7 = vcgtq_f32(vi7, vmax);
vmax = vbslq_f32(vm7, vi7, vmax);
vidx = vbslq_u32(vm7, vmovq_n_u32(7), vidx);
const uint32x4_t vm8 = vcgtq_f32(vi8, vmax);
vmax = vbslq_f32(vm8, vi8, vmax);
vidx = vbslq_u32(vm8, vmovq_n_u32(8), vidx);
vst1q_f32(ab, vmax); ab += 4;
vst1q_u32(ib, vidx); ib += 4;
}
}
const uint32x4_t v1 = vmovq_n_u32(1);
const uint32x4_t v8 = vmovq_n_u32(8);
uint32x4_t vidx0 = vaddq_u32(v1, v8);
size_t k = pooling_elements;
for (k -= 9; k > 8; k -= 8) {
const float* i0 = *input++;
const float* i1 = *input++;
const float* i2 = *input++;
const float* i3 = *input++;
const float* i4 = *input++;
const float* i5 = *input++;
const float* i6 = *input++;
const float* i7 = *input++;
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
float* ab = accumulation_buffer;
uint32_t* ib = index_buffer;
for (size_t c = 0; c < channels; c += 4) {
const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi6 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi7 = vld1q_f32(i7); i7 += 4;
float32x4_t vmax = vld1q_f32(ab);
uint32x4_t vidx = vld1q_u32(ib);
const uint32x4_t vm0 = vcgtq_f32(vi0, vmax);
vmax = vbslq_f32(vm0, vi0, vmax);
vidx = vbslq_u32(vm0, vidx0, vidx);
const uint32x4_t vm1 = vcgtq_f32(vi1, vmax);
const uint32x4_t vidx1 = vaddq_u32(vidx0, v1);
vmax = vbslq_f32(vm1, vi1, vmax);
vidx = vbslq_u32(vm1, vidx1, vidx);
const uint32x4_t vm2 = vcgtq_f32(vi2, vmax);
const uint32x4_t vidx2 = vaddq_u32(vidx1, v1);
vmax = vbslq_f32(vm2, vi2, vmax);
vidx = vbslq_u32(vm2, vidx2, vidx);
const uint32x4_t vm3 = vcgtq_f32(vi3, vmax);
const uint32x4_t vidx3 = vaddq_u32(vidx2, v1);
vmax = vbslq_f32(vm3, vi3, vmax);
vidx = vbslq_u32(vm3, vidx3, vidx);
const uint32x4_t vm4 = vcgtq_f32(vi4, vmax);
const uint32x4_t vidx4 = vaddq_u32(vidx3, v1);
vmax = vbslq_f32(vm4, vi4, vmax);
vidx = vbslq_u32(vm4, vidx4, vidx);
const uint32x4_t vm5 = vcgtq_f32(vi5, vmax);
const uint32x4_t vidx5 = vaddq_u32(vidx4, v1);
vmax = vbslq_f32(vm5, vi5, vmax);
vidx = vbslq_u32(vm5, vidx5, vidx);
const uint32x4_t vm6 = vcgtq_f32(vi6, vmax);
const uint32x4_t vidx6 = vaddq_u32(vidx5, v1);
vmax = vbslq_f32(vm6, vi6, vmax);
vidx = vbslq_u32(vm6, vidx6, vidx);
const uint32x4_t vm7 = vcgtq_f32(vi7, vmax);
const uint32x4_t vidx7 = vaddq_u32(vidx6, v1);
vmax = vbslq_f32(vm7, vi7, vmax);
vidx = vbslq_u32(vm7, vidx7, vidx);
vst1q_f32(ab, vmax); ab += 4;
vst1q_u32(ib, vidx); ib += 4;
}
vidx0 = vaddq_u32(vidx0, v8);
}
float* o = output;
uint32_t* i = index;
{
const float* i0 = input[0];
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
input = (const float**) ((uintptr_t) input + input_increment);
if (k < 2) {
i1 = i0;
}
if (k <= 2) {
i2 = i0;
}
if (k < 4) {
i3 = i0;
}
if (k <= 4) {
i4 = i0;
}
if (k < 6) {
i5 = i0;
}
if (k <= 6) {
i6 = i0;
}
if (k != 8) {
i7 = i0;
}
size_t c = channels;
float* ab = accumulation_buffer;
uint32_t* ib = index_buffer;
for (; c >= 4; c -= 4) {
const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi6 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi7 = vld1q_f32(i7); i7 += 4;
float32x4_t vmax = vld1q_f32(ab); ab += 4;
uint32x4_t vidx = vld1q_u32(ib); ib += 4;
const uint32x4_t vm0 = vcgtq_f32(vi0, vmax);
vmax = vbslq_f32(vm0, vi0, vmax);
vidx = vbslq_u32(vm0, vidx0, vidx);
const uint32x4_t vm1 = vcgtq_f32(vi1, vmax);
const uint32x4_t vidx1 = vaddq_u32(vidx0, v1);
vmax = vbslq_f32(vm1, vi1, vmax);
vidx = vbslq_u32(vm1, vidx1, vidx);
const uint32x4_t vm2 = vcgtq_f32(vi2, vmax);
const uint32x4_t vidx2 = vaddq_u32(vidx1, v1);
vmax = vbslq_f32(vm2, vi2, vmax);
vidx = vbslq_u32(vm2, vidx2, vidx);
const uint32x4_t vm3 = vcgtq_f32(vi3, vmax);
const uint32x4_t vidx3 = vaddq_u32(vidx2, v1);
vmax = vbslq_f32(vm3, vi3, vmax);
vidx = vbslq_u32(vm3, vidx3, vidx);
const uint32x4_t vm4 = vcgtq_f32(vi4, vmax);
const uint32x4_t vidx4 = vaddq_u32(vidx3, v1);
vmax = vbslq_f32(vm4, vi4, vmax);
vidx = vbslq_u32(vm4, vidx4, vidx);
const uint32x4_t vm5 = vcgtq_f32(vi5, vmax);
const uint32x4_t vidx5 = vaddq_u32(vidx4, v1);
vmax = vbslq_f32(vm5, vi5, vmax);
vidx = vbslq_u32(vm5, vidx5, vidx);
const uint32x4_t vm6 = vcgtq_f32(vi6, vmax);
const uint32x4_t vidx6 = vaddq_u32(vidx5, v1);
vmax = vbslq_f32(vm6, vi6, vmax);
vidx = vbslq_u32(vm6, vidx6, vidx);
const uint32x4_t vm7 = vcgtq_f32(vi7, vmax);
const uint32x4_t vidx7 = vaddq_u32(vidx6, v1);
vmax = vbslq_f32(vm7, vi7, vmax);
vidx = vbslq_u32(vm7, vidx7, vidx);
vst1q_f32(o, vmax); o += 4;
vst1q_u32(i, vidx); i += 4;
}
if (c != 0) {
const float32x4_t vi0 = vld1q_f32(i0);
const float32x4_t vi1 = vld1q_f32(i1);
const float32x4_t vi2 = vld1q_f32(i2);
const float32x4_t vi3 = vld1q_f32(i3);
const float32x4_t vi4 = vld1q_f32(i4);
const float32x4_t vi5 = vld1q_f32(i5);
const float32x4_t vi6 = vld1q_f32(i6);
const float32x4_t vi7 = vld1q_f32(i7);
float32x4_t vmax = vld1q_f32(ab);
uint32x4_t vidx = vld1q_u32(ib);
const uint32x4_t vm0 = vcgtq_f32(vi0, vmax);
vmax = vbslq_f32(vm0, vi0, vmax);
vidx = vbslq_u32(vm0, vidx0, vidx);
const uint32x4_t vm1 = vcgtq_f32(vi1, vmax);
const uint32x4_t vidx1 = vaddq_u32(vidx0, v1);
vmax = vbslq_f32(vm1, vi1, vmax);
vidx = vbslq_u32(vm1, vidx1, vidx);
const uint32x4_t vm2 = vcgtq_f32(vi2, vmax);
const uint32x4_t vidx2 = vaddq_u32(vidx1, v1);
vmax = vbslq_f32(vm2, vi2, vmax);
vidx = vbslq_u32(vm2, vidx2, vidx);
const uint32x4_t vm3 = vcgtq_f32(vi3, vmax);
const uint32x4_t vidx3 = vaddq_u32(vidx2, v1);
vmax = vbslq_f32(vm3, vi3, vmax);
vidx = vbslq_u32(vm3, vidx3, vidx);
const uint32x4_t vm4 = vcgtq_f32(vi4, vmax);
const uint32x4_t vidx4 = vaddq_u32(vidx3, v1);
vmax = vbslq_f32(vm4, vi4, vmax);
vidx = vbslq_u32(vm4, vidx4, vidx);
const uint32x4_t vm5 = vcgtq_f32(vi5, vmax);
const uint32x4_t vidx5 = vaddq_u32(vidx4, v1);
vmax = vbslq_f32(vm5, vi5, vmax);
vidx = vbslq_u32(vm5, vidx5, vidx);
const uint32x4_t vm6 = vcgtq_f32(vi6, vmax);
const uint32x4_t vidx6 = vaddq_u32(vidx5, v1);
vmax = vbslq_f32(vm6, vi6, vmax);
vidx = vbslq_u32(vm6, vidx6, vidx);
const uint32x4_t vm7 = vcgtq_f32(vi7, vmax);
const uint32x4_t vidx7 = vaddq_u32(vidx6, v1);
vmax = vbslq_f32(vm7, vi7, vmax);
vidx = vbslq_u32(vm7, vidx7, vidx);
float32x2_t vmax_lo = vget_low_f32(vmax);
uint32x2_t vidx_lo = vget_low_u32(vidx);
if (c & 2) {
vst1_f32(o, vmax_lo); o += 2;
vst1_u32(i, vidx_lo); i += 2;
vmax_lo = vget_high_f32(vmax);
vidx_lo = vget_high_u32(vidx);
}
if (c & 1) {
vst1_lane_f32(o, vmax_lo, 0); o += 1;
vst1_lane_u32(i, vidx_lo, 0); i += 1;
}
}
}
output = (float*) ((uintptr_t) o + output_increment);
index = (uint32_t*) i;
} while (--output_pixels != 0);
}
| 12,764 | 34.458333 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-argmaxpool/f32-argmaxpool-9p8x-scalar-c1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/argmaxpool.h>
#include <xnnpack/math.h>
void xnn_f32_argmaxpool_ukernel_9p8x__scalar_c1(
size_t output_pixels,
size_t pooling_elements,
size_t channels,
const float** input,
size_t input_offset,
float* accumulation_buffer,
uint32_t* index_buffer,
float* output,
uint32_t* index,
size_t input_increment,
size_t output_increment)
{
assert(output_pixels != 0);
assert(pooling_elements != 0);
assert(pooling_elements > 9);
assert(channels != 0);
do {
{
float* ab = accumulation_buffer;
uint32_t* ib = index_buffer;
const float* i0 = *input++;
const float* i1 = *input++;
const float* i2 = *input++;
const float* i3 = *input++;
const float* i4 = *input++;
const float* i5 = *input++;
const float* i6 = *input++;
const float* i7 = *input++;
const float* i8 = *input++;
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
i8 = (const float*) ((uintptr_t) i8 + input_offset);
size_t c = channels;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vi7 = *i7++;
const float vi8 = *i8++;
float vmax = vi0;
uint32_t vidx = 0;
if (vi1 > vmax) {
vmax = vi1;
vidx = 1;
}
if (vi2 > vmax) {
vmax = vi2;
vidx = 2;
}
if (vi3 > vmax) {
vmax = vi3;
vidx = 3;
}
if (vi4 > vmax) {
vmax = vi4;
vidx = 4;
}
if (vi5 > vmax) {
vmax = vi5;
vidx = 5;
}
if (vi6 > vmax) {
vmax = vi6;
vidx = 6;
}
if (vi7 > vmax) {
vmax = vi7;
vidx = 7;
}
if (vi8 > vmax) {
vmax = vi8;
vidx = 8;
}
*ab++ = vmax;
*ib++ = vidx;
} while (--c != 0);
}
uint32_t vidx0 = 9;
size_t k = pooling_elements;
for (k -= 9; k > 8; k -= 8) {
const float* i0 = *input++;
const float* i1 = *input++;
const float* i2 = *input++;
const float* i3 = *input++;
const float* i4 = *input++;
const float* i5 = *input++;
const float* i6 = *input++;
const float* i7 = *input++;
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
float* ab = accumulation_buffer;
uint32_t* ib = index_buffer;
size_t c = channels;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vi7 = *i7++;
float vmax = *ab;
uint32_t vidx = *ib;
if (vi0 > vmax) {
vmax = vi0;
vidx = vidx0;
}
if (vi1 > vmax) {
vmax = vi1;
vidx = vidx0 + 1;
}
if (vi2 > vmax) {
vmax = vi2;
vidx = vidx0 + 2;
}
if (vi3 > vmax) {
vmax = vi3;
vidx = vidx0 + 3;
}
if (vi4 > vmax) {
vmax = vi4;
vidx = vidx0 + 4;
}
if (vi5 > vmax) {
vmax = vi5;
vidx = vidx0 + 5;
}
if (vi6 > vmax) {
vmax = vi6;
vidx = vidx0 + 6;
}
if (vi7 > vmax) {
vmax = vi7;
vidx = vidx0 + 7;
}
*ab++ = vmax;
*ib++ = vidx;
} while (--c != 0);
vidx0 += 8;
}
float* o = output;
uint32_t* i = index;
{
const float* i0 = input[0];
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
input = (const float**) ((uintptr_t) input + input_increment);
if (k < 2) {
i1 = i0;
}
if (k <= 2) {
i2 = i0;
}
if (k < 4) {
i3 = i0;
}
if (k <= 4) {
i4 = i0;
}
if (k < 6) {
i5 = i0;
}
if (k <= 6) {
i6 = i0;
}
if (k != 8) {
i7 = i0;
}
size_t c = channels;
float* ab = accumulation_buffer;
uint32_t* ib = index_buffer;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vi7 = *i7++;
float vmax = *ab++;
uint32_t vidx = *ib++;
if (vi0 > vmax) {
vmax = vi0;
vidx = vidx0;
}
if (vi1 > vmax) {
vmax = vi1;
vidx = vidx0 + 1;
}
if (vi2 > vmax) {
vmax = vi2;
vidx = vidx0 + 2;
}
if (vi3 > vmax) {
vmax = vi3;
vidx = vidx0 + 3;
}
if (vi4 > vmax) {
vmax = vi4;
vidx = vidx0 + 4;
}
if (vi5 > vmax) {
vmax = vi5;
vidx = vidx0 + 5;
}
if (vi6 > vmax) {
vmax = vi6;
vidx = vidx0 + 6;
}
if (vi7 > vmax) {
vmax = vi7;
vidx = vidx0 + 7;
}
*o++ = vmax;
*i++ = vidx;
} while (--c != 0);
}
output = (float*) ((uintptr_t) o + output_increment);
index = (uint32_t*) i;
} while (--output_pixels != 0);
}
| 7,287 | 23.052805 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-argmaxpool/f32-argmaxpool-9p8x-sse2-c4.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/argmaxpool.h>
void xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4(
size_t output_pixels,
size_t pooling_elements,
size_t channels,
const float** input,
size_t input_offset,
float* accumulation_buffer,
uint32_t* index_buffer,
float* output,
uint32_t* index,
size_t input_increment,
size_t output_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(pooling_elements != 0);
assert(pooling_elements > 9);
assert(channels != 0);
do {
{
float* ab = accumulation_buffer;
uint32_t* ib = index_buffer;
const float* i0 = *input++;
const float* i1 = *input++;
const float* i2 = *input++;
const float* i3 = *input++;
const float* i4 = *input++;
const float* i5 = *input++;
const float* i6 = *input++;
const float* i7 = *input++;
const float* i8 = *input++;
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
i8 = (const float*) ((uintptr_t) i8 + input_offset);
for (size_t c = 0; c < channels; c += 4) {
const __m128 vi0 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi5 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vi6 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vi7 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vi8 = _mm_loadu_ps(i8);
i8 += 4;
__m128 vmax = vi0;
__m128i vidx = _mm_setzero_si128();
const __m128i vm1 = _mm_castps_si128(_mm_cmpgt_ps(vi1, vmax));
vmax = _mm_max_ps(vi1, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, _mm_set1_epi32(1)));
const __m128i vm2 = _mm_castps_si128(_mm_cmpgt_ps(vi2, vmax));
vmax = _mm_max_ps(vi2, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, _mm_set1_epi32(2)));
const __m128i vm3 = _mm_castps_si128(_mm_cmpgt_ps(vi3, vmax));
vmax = _mm_max_ps(vi3, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, _mm_set1_epi32(3)));
const __m128i vm4 = _mm_castps_si128(_mm_cmpgt_ps(vi4, vmax));
vmax = _mm_max_ps(vi4, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm4, vidx), _mm_and_si128(vm4, _mm_set1_epi32(4)));
const __m128i vm5 = _mm_castps_si128(_mm_cmpgt_ps(vi5, vmax));
vmax = _mm_max_ps(vi5, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm5, vidx), _mm_and_si128(vm5, _mm_set1_epi32(5)));
const __m128i vm6 = _mm_castps_si128(_mm_cmpgt_ps(vi6, vmax));
vmax = _mm_max_ps(vi6, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm6, vidx), _mm_and_si128(vm6, _mm_set1_epi32(6)));
const __m128i vm7 = _mm_castps_si128(_mm_cmpgt_ps(vi7, vmax));
vmax = _mm_max_ps(vi7, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm7, vidx), _mm_and_si128(vm7, _mm_set1_epi32(7)));
const __m128i vm8 = _mm_castps_si128(_mm_cmpgt_ps(vi8, vmax));
vmax = _mm_max_ps(vi8, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm8, vidx), _mm_and_si128(vm8, _mm_set1_epi32(8)));
_mm_store_ps(ab, vmax);
ab += 4;
_mm_store_si128((__m128i*) ib, vidx);
ib += 4;
}
}
const __m128i v1 = _mm_set1_epi32(1);
const __m128i v8 = _mm_set1_epi32(8);
__m128i vidx0 = _mm_add_epi32(v1, v8);
size_t k = pooling_elements;
for (k -= 9; k > 8; k -= 8) {
const float* i0 = *input++;
const float* i1 = *input++;
const float* i2 = *input++;
const float* i3 = *input++;
const float* i4 = *input++;
const float* i5 = *input++;
const float* i6 = *input++;
const float* i7 = *input++;
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
float* ab = accumulation_buffer;
uint32_t* ib = index_buffer;
for (size_t c = 0; c < channels; c += 4) {
const __m128 vi0 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi5 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vi6 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vi7 = _mm_loadu_ps(i7);
i7 += 4;
__m128 vmax = _mm_load_ps(ab);
__m128i vidx = _mm_load_si128((const __m128i*) ib);
const __m128i vm0 = _mm_castps_si128(_mm_cmpgt_ps(vi0, vmax));
vmax = _mm_max_ps(vi0, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm0, vidx), _mm_and_si128(vm0, vidx0));
const __m128i vm1 = _mm_castps_si128(_mm_cmpgt_ps(vi1, vmax));
const __m128i vidx1 = _mm_add_epi32(vidx0, v1);
vmax = _mm_max_ps(vi1, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, vidx1));
const __m128i vm2 = _mm_castps_si128(_mm_cmpgt_ps(vi2, vmax));
const __m128i vidx2 = _mm_add_epi32(vidx1, v1);
vmax = _mm_max_ps(vi2, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, vidx2));
const __m128i vm3 = _mm_castps_si128(_mm_cmpgt_ps(vi3, vmax));
const __m128i vidx3 = _mm_add_epi32(vidx2, v1);
vmax = _mm_max_ps(vi3, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, vidx3));
const __m128i vm4 = _mm_castps_si128(_mm_cmpgt_ps(vi4, vmax));
const __m128i vidx4 = _mm_add_epi32(vidx3, v1);
vmax = _mm_max_ps(vi4, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm4, vidx), _mm_and_si128(vm4, vidx4));
const __m128i vm5 = _mm_castps_si128(_mm_cmpgt_ps(vi5, vmax));
const __m128i vidx5 = _mm_add_epi32(vidx4, v1);
vmax = _mm_max_ps(vi5, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm5, vidx), _mm_and_si128(vm5, vidx5));
const __m128i vm6 = _mm_castps_si128(_mm_cmpgt_ps(vi6, vmax));
const __m128i vidx6 = _mm_add_epi32(vidx5, v1);
vmax = _mm_max_ps(vi6, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm6, vidx), _mm_and_si128(vm6, vidx6));
const __m128i vm7 = _mm_castps_si128(_mm_cmpgt_ps(vi7, vmax));
const __m128i vidx7 = _mm_add_epi32(vidx6, v1);
vmax = _mm_max_ps(vi7, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm7, vidx), _mm_and_si128(vm7, vidx7));
_mm_store_ps(ab, vmax);
ab += 4;
_mm_store_si128((__m128i*) ib, vidx);
ib += 4;
}
vidx0 = _mm_add_epi32(vidx0, v8);
}
float* o = output;
uint32_t* i = index;
{
const float* i0 = input[0];
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
input = (const float**) ((uintptr_t) input + input_increment);
if (k < 2) {
i1 = i0;
}
if (k <= 2) {
i2 = i0;
}
if (k < 4) {
i3 = i0;
}
if (k <= 4) {
i4 = i0;
}
if (k < 6) {
i5 = i0;
}
if (k <= 6) {
i6 = i0;
}
if (k != 8) {
i7 = i0;
}
size_t c = channels;
float* ab = accumulation_buffer;
uint32_t* ib = index_buffer;
for (; c >= 4; c -= 4) {
const __m128 vi0 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi5 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vi6 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vi7 = _mm_loadu_ps(i7);
i7 += 4;
__m128 vmax = _mm_load_ps(ab);
ab += 4;
__m128i vidx = _mm_load_si128((const __m128i*) ib);
ib += 4;
const __m128i vm0 = _mm_castps_si128(_mm_cmpgt_ps(vi0, vmax));
vmax = _mm_max_ps(vi0, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm0, vidx), _mm_and_si128(vm0, vidx0));
const __m128i vm1 = _mm_castps_si128(_mm_cmpgt_ps(vi1, vmax));
const __m128i vidx1 = _mm_add_epi32(vidx0, v1);
vmax = _mm_max_ps(vi1, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, vidx1));
const __m128i vm2 = _mm_castps_si128(_mm_cmpgt_ps(vi2, vmax));
const __m128i vidx2 = _mm_add_epi32(vidx1, v1);
vmax = _mm_max_ps(vi2, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, vidx2));
const __m128i vm3 = _mm_castps_si128(_mm_cmpgt_ps(vi3, vmax));
const __m128i vidx3 = _mm_add_epi32(vidx2, v1);
vmax = _mm_max_ps(vi3, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, vidx3));
const __m128i vm4 = _mm_castps_si128(_mm_cmpgt_ps(vi4, vmax));
const __m128i vidx4 = _mm_add_epi32(vidx3, v1);
vmax = _mm_max_ps(vi4, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm4, vidx), _mm_and_si128(vm4, vidx4));
const __m128i vm5 = _mm_castps_si128(_mm_cmpgt_ps(vi5, vmax));
const __m128i vidx5 = _mm_add_epi32(vidx4, v1);
vmax = _mm_max_ps(vi5, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm5, vidx), _mm_and_si128(vm5, vidx5));
const __m128i vm6 = _mm_castps_si128(_mm_cmpgt_ps(vi6, vmax));
const __m128i vidx6 = _mm_add_epi32(vidx5, v1);
vmax = _mm_max_ps(vi6, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm6, vidx), _mm_and_si128(vm6, vidx6));
const __m128i vm7 = _mm_castps_si128(_mm_cmpgt_ps(vi7, vmax));
const __m128i vidx7 = _mm_add_epi32(vidx6, v1);
vmax = _mm_max_ps(vi7, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm7, vidx), _mm_and_si128(vm7, vidx7));
_mm_storeu_ps(o, vmax);
o += 4;
_mm_storeu_si128((__m128i*) i, vidx);
i += 4;
}
if (c != 0) {
const __m128 vi0 = _mm_loadu_ps(i0);
const __m128 vi1 = _mm_loadu_ps(i1);
const __m128 vi2 = _mm_loadu_ps(i2);
const __m128 vi3 = _mm_loadu_ps(i3);
const __m128 vi4 = _mm_loadu_ps(i4);
const __m128 vi5 = _mm_loadu_ps(i5);
const __m128 vi6 = _mm_loadu_ps(i6);
const __m128 vi7 = _mm_loadu_ps(i7);
__m128 vmax = _mm_load_ps(ab);
__m128i vidx = _mm_load_si128((const __m128i*) ib);
const __m128i vm0 = _mm_castps_si128(_mm_cmpgt_ps(vi0, vmax));
vmax = _mm_max_ps(vi0, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm0, vidx), _mm_and_si128(vm0, vidx0));
const __m128i vm1 = _mm_castps_si128(_mm_cmpgt_ps(vi1, vmax));
const __m128i vidx1 = _mm_add_epi32(vidx0, v1);
vmax = _mm_max_ps(vi1, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, vidx1));
const __m128i vm2 = _mm_castps_si128(_mm_cmpgt_ps(vi2, vmax));
const __m128i vidx2 = _mm_add_epi32(vidx1, v1);
vmax = _mm_max_ps(vi2, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, vidx2));
const __m128i vm3 = _mm_castps_si128(_mm_cmpgt_ps(vi3, vmax));
const __m128i vidx3 = _mm_add_epi32(vidx2, v1);
vmax = _mm_max_ps(vi3, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, vidx3));
const __m128i vm4 = _mm_castps_si128(_mm_cmpgt_ps(vi4, vmax));
const __m128i vidx4 = _mm_add_epi32(vidx3, v1);
vmax = _mm_max_ps(vi4, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm4, vidx), _mm_and_si128(vm4, vidx4));
const __m128i vm5 = _mm_castps_si128(_mm_cmpgt_ps(vi5, vmax));
const __m128i vidx5 = _mm_add_epi32(vidx4, v1);
vmax = _mm_max_ps(vi5, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm5, vidx), _mm_and_si128(vm5, vidx5));
const __m128i vm6 = _mm_castps_si128(_mm_cmpgt_ps(vi6, vmax));
const __m128i vidx6 = _mm_add_epi32(vidx5, v1);
vmax = _mm_max_ps(vi6, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm6, vidx), _mm_and_si128(vm6, vidx6));
const __m128i vm7 = _mm_castps_si128(_mm_cmpgt_ps(vi7, vmax));
const __m128i vidx7 = _mm_add_epi32(vidx6, v1);
vmax = _mm_max_ps(vi7, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm7, vidx), _mm_and_si128(vm7, vidx7));
if (c & 2) {
_mm_storel_pi((__m64*) o, vmax);
_mm_storel_epi64((__m128i*) i, vidx);
vmax = _mm_movehl_ps(vmax, vmax);
vidx = _mm_unpackhi_epi64(vidx, vidx);
o += 2;
i += 2;
}
if (c & 1) {
_mm_store_ss(o, vmax);
*i = (uint32_t) _mm_cvtsi128_si32(vidx);
o += 1;
i += 1;
}
}
}
output = (float*) ((uintptr_t) o + output_increment);
index = (uint32_t*) i;
} while (--output_pixels != 0);
}
| 14,863 | 36.63038 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-argmaxpool/f32-argmaxpool-9p8x-wasmsimd-c4.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/argmaxpool.h>
void xnn_f32_argmaxpool_ukernel_9p8x__wasmsimd_c4(
size_t output_pixels,
size_t pooling_elements,
size_t channels,
const float** input,
size_t input_offset,
float* accumulation_buffer,
uint32_t* index_buffer,
float* output,
uint32_t* index,
size_t input_increment,
size_t output_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(pooling_elements != 0);
assert(pooling_elements > 9);
assert(channels != 0);
do {
{
float* ab = accumulation_buffer;
uint32_t* ib = index_buffer;
const float* i0 = *input++;
const float* i1 = *input++;
const float* i2 = *input++;
const float* i3 = *input++;
const float* i4 = *input++;
const float* i5 = *input++;
const float* i6 = *input++;
const float* i7 = *input++;
const float* i8 = *input++;
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
i8 = (const float*) ((uintptr_t) i8 + input_offset);
for (size_t c = 0; c < channels; c += 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
const v128_t vi8 = wasm_v128_load(i8);
i8 += 4;
v128_t vmax = vi0;
v128_t vidx = wasm_i32x4_const_splat(0);
const v128_t vm1 = wasm_f32x4_gt(vi1, vmax);
vmax = wasm_v128_bitselect(vi1, vmax, vm1);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(1), vidx, vm1);
const v128_t vm2 = wasm_f32x4_gt(vi2, vmax);
vmax = wasm_v128_bitselect(vi2, vmax, vm2);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(2), vidx, vm2);
const v128_t vm3 = wasm_f32x4_gt(vi3, vmax);
vmax = wasm_v128_bitselect(vi3, vmax, vm3);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(3), vidx, vm3);
const v128_t vm4 = wasm_f32x4_gt(vi4, vmax);
vmax = wasm_v128_bitselect(vi4, vmax, vm4);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(4), vidx, vm4);
const v128_t vm5 = wasm_f32x4_gt(vi5, vmax);
vmax = wasm_v128_bitselect(vi5, vmax, vm5);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(5), vidx, vm5);
const v128_t vm6 = wasm_f32x4_gt(vi6, vmax);
vmax = wasm_v128_bitselect(vi6, vmax, vm6);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(6), vidx, vm6);
const v128_t vm7 = wasm_f32x4_gt(vi7, vmax);
vmax = wasm_v128_bitselect(vi7, vmax, vm7);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(7), vidx, vm7);
const v128_t vm8 = wasm_f32x4_gt(vi8, vmax);
vmax = wasm_v128_bitselect(vi8, vmax, vm8);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(8), vidx, vm8);
wasm_v128_store(ab, vmax);
ab += 4;
wasm_v128_store(ib, vidx);
ib += 4;
}
}
const v128_t v1 = wasm_i32x4_const_splat(1);
const v128_t v8 = wasm_i32x4_const_splat(8);
v128_t vidx0 = wasm_i32x4_add(v1, v8);
size_t k = pooling_elements;
for (k -= 9; k > 8; k -= 8) {
const float* i0 = *input++;
const float* i1 = *input++;
const float* i2 = *input++;
const float* i3 = *input++;
const float* i4 = *input++;
const float* i5 = *input++;
const float* i6 = *input++;
const float* i7 = *input++;
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
float* ab = accumulation_buffer;
uint32_t* ib = index_buffer;
for (size_t c = 0; c < channels; c += 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
v128_t vmax = wasm_v128_load(ab);
v128_t vidx = wasm_v128_load(ib);
const v128_t vm0 = wasm_f32x4_gt(vi0, vmax);
vmax = wasm_v128_bitselect(vi0, vmax, vm0);
vidx = wasm_v128_bitselect(vidx0, vidx, vm0);
const v128_t vm1 = wasm_f32x4_gt(vi1, vmax);
const v128_t vidx1 = wasm_i32x4_add(vidx0, v1);
vmax = wasm_v128_bitselect(vi1, vmax, vm1);
vidx = wasm_v128_bitselect(vidx1, vidx, vm1);
const v128_t vm2 = wasm_f32x4_gt(vi2, vmax);
const v128_t vidx2 = wasm_i32x4_add(vidx1, v1);
vmax = wasm_v128_bitselect(vi2, vmax, vm2);
vidx = wasm_v128_bitselect(vidx2, vidx, vm2);
const v128_t vm3 = wasm_f32x4_gt(vi3, vmax);
const v128_t vidx3 = wasm_i32x4_add(vidx2, v1);
vmax = wasm_v128_bitselect(vi3, vmax, vm3);
vidx = wasm_v128_bitselect(vidx3, vidx, vm3);
const v128_t vm4 = wasm_f32x4_gt(vi4, vmax);
const v128_t vidx4 = wasm_i32x4_add(vidx3, v1);
vmax = wasm_v128_bitselect(vi4, vmax, vm4);
vidx = wasm_v128_bitselect(vidx4, vidx, vm4);
const v128_t vm5 = wasm_f32x4_gt(vi5, vmax);
const v128_t vidx5 = wasm_i32x4_add(vidx4, v1);
vmax = wasm_v128_bitselect(vi5, vmax, vm5);
vidx = wasm_v128_bitselect(vidx5, vidx, vm5);
const v128_t vm6 = wasm_f32x4_gt(vi6, vmax);
const v128_t vidx6 = wasm_i32x4_add(vidx5, v1);
vmax = wasm_v128_bitselect(vi6, vmax, vm6);
vidx = wasm_v128_bitselect(vidx6, vidx, vm6);
const v128_t vm7 = wasm_f32x4_gt(vi7, vmax);
const v128_t vidx7 = wasm_i32x4_add(vidx6, v1);
vmax = wasm_v128_bitselect(vi7, vmax, vm7);
vidx = wasm_v128_bitselect(vidx7, vidx, vm7);
wasm_v128_store(ab, vmax);
ab += 4;
wasm_v128_store(ib, vidx);
ib += 4;
}
vidx0 = wasm_i32x4_add(vidx0, v8);
}
float* o = output;
float* i = (float*) index;
{
const float* i0 = input[0];
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
input = (const float**) ((uintptr_t) input + input_increment);
if (k < 2) {
i1 = i0;
}
if (k <= 2) {
i2 = i0;
}
if (k < 4) {
i3 = i0;
}
if (k <= 4) {
i4 = i0;
}
if (k < 6) {
i5 = i0;
}
if (k <= 6) {
i6 = i0;
}
if (k != 8) {
i7 = i0;
}
size_t c = channels;
float* ab = accumulation_buffer;
uint32_t* ib = index_buffer;
for (; c >= 4; c -= 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
v128_t vmax = wasm_v128_load(ab);
ab += 4;
v128_t vidx = wasm_v128_load(ib);
ib += 4;
const v128_t vm0 = wasm_f32x4_gt(vi0, vmax);
vmax = wasm_v128_bitselect(vi0, vmax, vm0);
vidx = wasm_v128_bitselect(vidx0, vidx, vm0);
const v128_t vm1 = wasm_f32x4_gt(vi1, vmax);
const v128_t vidx1 = wasm_i32x4_add(vidx0, v1);
vmax = wasm_v128_bitselect(vi1, vmax, vm1);
vidx = wasm_v128_bitselect(vidx1, vidx, vm1);
const v128_t vm2 = wasm_f32x4_gt(vi2, vmax);
const v128_t vidx2 = wasm_i32x4_add(vidx1, v1);
vmax = wasm_v128_bitselect(vi2, vmax, vm2);
vidx = wasm_v128_bitselect(vidx2, vidx, vm2);
const v128_t vm3 = wasm_f32x4_gt(vi3, vmax);
const v128_t vidx3 = wasm_i32x4_add(vidx2, v1);
vmax = wasm_v128_bitselect(vi3, vmax, vm3);
vidx = wasm_v128_bitselect(vidx3, vidx, vm3);
const v128_t vm4 = wasm_f32x4_gt(vi4, vmax);
const v128_t vidx4 = wasm_i32x4_add(vidx3, v1);
vmax = wasm_v128_bitselect(vi4, vmax, vm4);
vidx = wasm_v128_bitselect(vidx4, vidx, vm4);
const v128_t vm5 = wasm_f32x4_gt(vi5, vmax);
const v128_t vidx5 = wasm_i32x4_add(vidx4, v1);
vmax = wasm_v128_bitselect(vi5, vmax, vm5);
vidx = wasm_v128_bitselect(vidx5, vidx, vm5);
const v128_t vm6 = wasm_f32x4_gt(vi6, vmax);
const v128_t vidx6 = wasm_i32x4_add(vidx5, v1);
vmax = wasm_v128_bitselect(vi6, vmax, vm6);
vidx = wasm_v128_bitselect(vidx6, vidx, vm6);
const v128_t vm7 = wasm_f32x4_gt(vi7, vmax);
const v128_t vidx7 = wasm_i32x4_add(vidx6, v1);
vmax = wasm_v128_bitselect(vi7, vmax, vm7);
vidx = wasm_v128_bitselect(vidx7, vidx, vm7);
wasm_v128_store(o, vmax);
o += 4;
wasm_v128_store(i, vidx);
i += 4;
}
if (c != 0) {
const v128_t vi0 = wasm_v128_load(i0);
const v128_t vi1 = wasm_v128_load(i1);
const v128_t vi2 = wasm_v128_load(i2);
const v128_t vi3 = wasm_v128_load(i3);
const v128_t vi4 = wasm_v128_load(i4);
const v128_t vi5 = wasm_v128_load(i5);
const v128_t vi6 = wasm_v128_load(i6);
const v128_t vi7 = wasm_v128_load(i7);
v128_t vmax = wasm_v128_load(ab);
v128_t vidx = wasm_v128_load(ib);
const v128_t vm0 = wasm_f32x4_gt(vi0, vmax);
vmax = wasm_v128_bitselect(vi0, vmax, vm0);
vidx = wasm_v128_bitselect(vidx0, vidx, vm0);
const v128_t vm1 = wasm_f32x4_gt(vi1, vmax);
const v128_t vidx1 = wasm_i32x4_add(vidx0, v1);
vmax = wasm_v128_bitselect(vi1, vmax, vm1);
vidx = wasm_v128_bitselect(vidx1, vidx, vm1);
const v128_t vm2 = wasm_f32x4_gt(vi2, vmax);
const v128_t vidx2 = wasm_i32x4_add(vidx1, v1);
vmax = wasm_v128_bitselect(vi2, vmax, vm2);
vidx = wasm_v128_bitselect(vidx2, vidx, vm2);
const v128_t vm3 = wasm_f32x4_gt(vi3, vmax);
const v128_t vidx3 = wasm_i32x4_add(vidx2, v1);
vmax = wasm_v128_bitselect(vi3, vmax, vm3);
vidx = wasm_v128_bitselect(vidx3, vidx, vm3);
const v128_t vm4 = wasm_f32x4_gt(vi4, vmax);
const v128_t vidx4 = wasm_i32x4_add(vidx3, v1);
vmax = wasm_v128_bitselect(vi4, vmax, vm4);
vidx = wasm_v128_bitselect(vidx4, vidx, vm4);
const v128_t vm5 = wasm_f32x4_gt(vi5, vmax);
const v128_t vidx5 = wasm_i32x4_add(vidx4, v1);
vmax = wasm_v128_bitselect(vi5, vmax, vm5);
vidx = wasm_v128_bitselect(vidx5, vidx, vm5);
const v128_t vm6 = wasm_f32x4_gt(vi6, vmax);
const v128_t vidx6 = wasm_i32x4_add(vidx5, v1);
vmax = wasm_v128_bitselect(vi6, vmax, vm6);
vidx = wasm_v128_bitselect(vidx6, vidx, vm6);
const v128_t vm7 = wasm_f32x4_gt(vi7, vmax);
const v128_t vidx7 = wasm_i32x4_add(vidx6, v1);
vmax = wasm_v128_bitselect(vi7, vmax, vm7);
vidx = wasm_v128_bitselect(vidx7, vidx, vm7);
if (c & 2) {
wasm_v128_store64_lane(o, vmax, 0);
wasm_v128_store64_lane(i, vidx, 0);
vmax = wasm_v64x2_shuffle(vmax, vmax, 1, 1);
vidx = wasm_v64x2_shuffle(vidx, vidx, 1, 1);
o += 2;
i += 2;
}
if (c & 1) {
wasm_v128_store32_lane(o, vmax, 0);
wasm_v128_store32_lane(i, vidx, 0);
o += 1;
i += 1;
}
}
}
output = (float*) ((uintptr_t) o + output_increment);
index = (uint32_t*) i;
} while (--output_pixels != 0);
}
| 13,861 | 34.093671 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-argmaxpool/f32-argmaxpool-9x-neon-c4.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/argmaxpool.h>
void xnn_f32_argmaxpool_ukernel_9x__neon_c4(
size_t output_pixels,
size_t pooling_elements,
size_t channels,
const float** input,
size_t input_offset,
float* output,
uint32_t* index,
size_t input_increment,
size_t output_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(pooling_elements != 0);
assert(pooling_elements <= 9);
assert(channels != 0);
do {
const float* i0 = input[0];
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
const float* i8 = input[8];
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
i8 = (const float*) ((uintptr_t) i8 + input_offset);
if (pooling_elements < 2) {
i1 = i0;
}
if (pooling_elements <= 2) {
i2 = i0;
}
if (pooling_elements < 4) {
i3 = i0;
}
if (pooling_elements <= 4) {
i4 = i0;
}
if (pooling_elements < 6) {
i5 = i0;
}
if (pooling_elements <= 6) {
i6 = i0;
}
if (pooling_elements < 8) {
i7 = i0;
}
if (pooling_elements <= 8) {
i8 = i0;
}
size_t c = channels;
for (; c >= 4; c -= 4) {
const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi6 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi7 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi8 = vld1q_f32(i8); i8 += 4;
float32x4_t vmax = vi0;
uint32x4_t vidx = vmovq_n_u32(0);
const uint32x4_t vm1 = vcgtq_f32(vi1, vmax);
vmax = vbslq_f32(vm1, vi1, vmax);
vidx = vbslq_u32(vm1, vmovq_n_u32(1), vidx);
const uint32x4_t vm2 = vcgtq_f32(vi2, vmax);
vmax = vbslq_f32(vm2, vi2, vmax);
vidx = vbslq_u32(vm2, vmovq_n_u32(2), vidx);
const uint32x4_t vm3 = vcgtq_f32(vi3, vmax);
vmax = vbslq_f32(vm3, vi3, vmax);
vidx = vbslq_u32(vm3, vmovq_n_u32(3), vidx);
const uint32x4_t vm4 = vcgtq_f32(vi4, vmax);
vmax = vbslq_f32(vm4, vi4, vmax);
vidx = vbslq_u32(vm4, vmovq_n_u32(4), vidx);
const uint32x4_t vm5 = vcgtq_f32(vi5, vmax);
vmax = vbslq_f32(vm5, vi5, vmax);
vidx = vbslq_u32(vm5, vmovq_n_u32(5), vidx);
const uint32x4_t vm6 = vcgtq_f32(vi6, vmax);
vmax = vbslq_f32(vm6, vi6, vmax);
vidx = vbslq_u32(vm6, vmovq_n_u32(6), vidx);
const uint32x4_t vm7 = vcgtq_f32(vi7, vmax);
vmax = vbslq_f32(vm7, vi7, vmax);
vidx = vbslq_u32(vm7, vmovq_n_u32(7), vidx);
const uint32x4_t vm8 = vcgtq_f32(vi8, vmax);
vmax = vbslq_f32(vm8, vi8, vmax);
vidx = vbslq_u32(vm8, vmovq_n_u32(8), vidx);
vst1q_f32(output, vmax); output += 4;
vst1q_u32(index, vidx); index += 4;
}
if (c != 0) {
const float32x4_t vi0 = vld1q_f32(i0);
const float32x4_t vi1 = vld1q_f32(i1);
const float32x4_t vi2 = vld1q_f32(i2);
const float32x4_t vi3 = vld1q_f32(i3);
const float32x4_t vi4 = vld1q_f32(i4);
const float32x4_t vi5 = vld1q_f32(i5);
const float32x4_t vi6 = vld1q_f32(i6);
const float32x4_t vi7 = vld1q_f32(i7);
const float32x4_t vi8 = vld1q_f32(i8);
float32x4_t vmax = vi0;
uint32x4_t vidx = vmovq_n_u32(0);
const uint32x4_t vm1 = vcgtq_f32(vi1, vmax);
vmax = vbslq_f32(vm1, vi1, vmax);
vidx = vbslq_u32(vm1, vmovq_n_u32(1), vidx);
const uint32x4_t vm2 = vcgtq_f32(vi2, vmax);
vmax = vbslq_f32(vm2, vi2, vmax);
vidx = vbslq_u32(vm2, vmovq_n_u32(2), vidx);
const uint32x4_t vm3 = vcgtq_f32(vi3, vmax);
vmax = vbslq_f32(vm3, vi3, vmax);
vidx = vbslq_u32(vm3, vmovq_n_u32(3), vidx);
const uint32x4_t vm4 = vcgtq_f32(vi4, vmax);
vmax = vbslq_f32(vm4, vi4, vmax);
vidx = vbslq_u32(vm4, vmovq_n_u32(4), vidx);
const uint32x4_t vm5 = vcgtq_f32(vi5, vmax);
vmax = vbslq_f32(vm5, vi5, vmax);
vidx = vbslq_u32(vm5, vmovq_n_u32(5), vidx);
const uint32x4_t vm6 = vcgtq_f32(vi6, vmax);
vmax = vbslq_f32(vm6, vi6, vmax);
vidx = vbslq_u32(vm6, vmovq_n_u32(6), vidx);
const uint32x4_t vm7 = vcgtq_f32(vi7, vmax);
vmax = vbslq_f32(vm7, vi7, vmax);
vidx = vbslq_u32(vm7, vmovq_n_u32(7), vidx);
const uint32x4_t vm8 = vcgtq_f32(vi8, vmax);
vmax = vbslq_f32(vm8, vi8, vmax);
vidx = vbslq_u32(vm8, vmovq_n_u32(8), vidx);
float32x2_t vmax_lo = vget_low_f32(vmax);
uint32x2_t vidx_lo = vget_low_u32(vidx);
if (c & 2) {
vst1_f32(output, vmax_lo); output += 2;
vst1_u32(index, vidx_lo); index += 2;
vmax_lo = vget_high_f32(vmax);
vidx_lo = vget_high_u32(vidx);
}
if (c & 1) {
vst1_lane_f32(output, vmax_lo, 0); output += 1;
vst1_lane_u32(index, vidx_lo, 0); index += 1;
}
}
input = (const float**) ((uintptr_t) input + input_increment);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 5,976 | 31.134409 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-argmaxpool/f32-argmaxpool-9x-scalar-c1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/argmaxpool.h>
#include <xnnpack/math.h>
void xnn_f32_argmaxpool_ukernel_9x__scalar_c1(
size_t output_pixels,
size_t pooling_elements,
size_t channels,
const float** input,
size_t input_offset,
float* output,
uint32_t* index,
size_t input_increment,
size_t output_increment)
{
assert(output_pixels != 0);
assert(pooling_elements != 0);
assert(pooling_elements <= 9);
assert(channels != 0);
do {
const float* i0 = input[0];
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
const float* i8 = input[8];
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
i8 = (const float*) ((uintptr_t) i8 + input_offset);
if (pooling_elements < 2) {
i1 = i0;
}
if (pooling_elements <= 2) {
i2 = i0;
}
if (pooling_elements < 4) {
i3 = i0;
}
if (pooling_elements <= 4) {
i4 = i0;
}
if (pooling_elements < 6) {
i5 = i0;
}
if (pooling_elements <= 6) {
i6 = i0;
}
if (pooling_elements < 8) {
i7 = i0;
}
if (pooling_elements <= 8) {
i8 = i0;
}
size_t c = channels;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vi7 = *i7++;
const float vi8 = *i8++;
float vmax = vi0;
uint32_t vidx = 0;
if (vi1 > vmax) {
vmax = vi1;
vidx = 1;
}
if (vi2 > vmax) {
vmax = vi2;
vidx = 2;
}
if (vi3 > vmax) {
vmax = vi3;
vidx = 3;
}
if (vi4 > vmax) {
vmax = vi4;
vidx = 4;
}
if (vi5 > vmax) {
vmax = vi5;
vidx = 5;
}
if (vi6 > vmax) {
vmax = vi6;
vidx = 6;
}
if (vi7 > vmax) {
vmax = vi7;
vidx = 7;
}
if (vi8 > vmax) {
vmax = vi8;
vidx = 8;
}
*output++ = vmax;
*index++ = vidx;
} while (--c != 0);
input = (const float**) ((uintptr_t) input + input_increment);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 3,046 | 21.738806 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-argmaxpool/f32-argmaxpool-9x-sse2-c4.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/argmaxpool.h>
void xnn_f32_argmaxpool_ukernel_9x__sse2_c4(
size_t output_pixels,
size_t pooling_elements,
size_t channels,
const float** input,
size_t input_offset,
float* output,
uint32_t* index,
size_t input_increment,
size_t output_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(pooling_elements != 0);
assert(pooling_elements <= 9);
assert(channels != 0);
do {
const float* i0 = input[0];
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
const float* i8 = input[8];
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
i8 = (const float*) ((uintptr_t) i8 + input_offset);
if (pooling_elements < 2) {
i1 = i0;
}
if (pooling_elements <= 2) {
i2 = i0;
}
if (pooling_elements < 4) {
i3 = i0;
}
if (pooling_elements <= 4) {
i4 = i0;
}
if (pooling_elements < 6) {
i5 = i0;
}
if (pooling_elements <= 6) {
i6 = i0;
}
if (pooling_elements < 8) {
i7 = i0;
}
if (pooling_elements <= 8) {
i8 = i0;
}
size_t c = channels;
for (; c >= 4; c -= 4) {
const __m128 vi0 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi5 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vi6 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vi7 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vi8 = _mm_loadu_ps(i8);
i8 += 4;
__m128 vmax = vi0;
__m128i vidx = _mm_setzero_si128();
const __m128i vm1 = _mm_castps_si128(_mm_cmpgt_ps(vi1, vmax));
vmax = _mm_max_ps(vi1, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, _mm_set1_epi32(1)));
const __m128i vm2 = _mm_castps_si128(_mm_cmpgt_ps(vi2, vmax));
vmax = _mm_max_ps(vi2, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, _mm_set1_epi32(2)));
const __m128i vm3 = _mm_castps_si128(_mm_cmpgt_ps(vi3, vmax));
vmax = _mm_max_ps(vi3, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, _mm_set1_epi32(3)));
const __m128i vm4 = _mm_castps_si128(_mm_cmpgt_ps(vi4, vmax));
vmax = _mm_max_ps(vi4, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm4, vidx), _mm_and_si128(vm4, _mm_set1_epi32(4)));
const __m128i vm5 = _mm_castps_si128(_mm_cmpgt_ps(vi5, vmax));
vmax = _mm_max_ps(vi5, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm5, vidx), _mm_and_si128(vm5, _mm_set1_epi32(5)));
const __m128i vm6 = _mm_castps_si128(_mm_cmpgt_ps(vi6, vmax));
vmax = _mm_max_ps(vi6, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm6, vidx), _mm_and_si128(vm6, _mm_set1_epi32(6)));
const __m128i vm7 = _mm_castps_si128(_mm_cmpgt_ps(vi7, vmax));
vmax = _mm_max_ps(vi7, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm7, vidx), _mm_and_si128(vm7, _mm_set1_epi32(7)));
const __m128i vm8 = _mm_castps_si128(_mm_cmpgt_ps(vi8, vmax));
vmax = _mm_max_ps(vi8, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm8, vidx), _mm_and_si128(vm8, _mm_set1_epi32(8)));
_mm_storeu_ps(output, vmax);
output += 4;
_mm_storeu_si128((__m128i*) index, vidx);
index += 4;
}
if (c != 0) {
const __m128 vi0 = _mm_loadu_ps(i0);
const __m128 vi1 = _mm_loadu_ps(i1);
const __m128 vi2 = _mm_loadu_ps(i2);
const __m128 vi3 = _mm_loadu_ps(i3);
const __m128 vi4 = _mm_loadu_ps(i4);
const __m128 vi5 = _mm_loadu_ps(i5);
const __m128 vi6 = _mm_loadu_ps(i6);
const __m128 vi7 = _mm_loadu_ps(i7);
const __m128 vi8 = _mm_loadu_ps(i8);
__m128 vmax = vi0;
__m128i vidx = _mm_setzero_si128();
const __m128i vm1 = _mm_castps_si128(_mm_cmpgt_ps(vi1, vmax));
vmax = _mm_max_ps(vi1, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, _mm_set1_epi32(1)));
const __m128i vm2 = _mm_castps_si128(_mm_cmpgt_ps(vi2, vmax));
vmax = _mm_max_ps(vi2, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, _mm_set1_epi32(2)));
const __m128i vm3 = _mm_castps_si128(_mm_cmpgt_ps(vi3, vmax));
vmax = _mm_max_ps(vi3, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, _mm_set1_epi32(3)));
const __m128i vm4 = _mm_castps_si128(_mm_cmpgt_ps(vi4, vmax));
vmax = _mm_max_ps(vi4, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm4, vidx), _mm_and_si128(vm4, _mm_set1_epi32(4)));
const __m128i vm5 = _mm_castps_si128(_mm_cmpgt_ps(vi5, vmax));
vmax = _mm_max_ps(vi5, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm5, vidx), _mm_and_si128(vm5, _mm_set1_epi32(5)));
const __m128i vm6 = _mm_castps_si128(_mm_cmpgt_ps(vi6, vmax));
vmax = _mm_max_ps(vi6, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm6, vidx), _mm_and_si128(vm6, _mm_set1_epi32(6)));
const __m128i vm7 = _mm_castps_si128(_mm_cmpgt_ps(vi7, vmax));
vmax = _mm_max_ps(vi7, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm7, vidx), _mm_and_si128(vm7, _mm_set1_epi32(7)));
const __m128i vm8 = _mm_castps_si128(_mm_cmpgt_ps(vi8, vmax));
vmax = _mm_max_ps(vi8, vmax);
vidx = _mm_or_si128(_mm_andnot_si128(vm8, vidx), _mm_and_si128(vm8, _mm_set1_epi32(8)));
if (c & 2) {
_mm_storel_pi((__m64*) output, vmax);
_mm_storel_epi64((__m128i*) index, vidx);
vmax = _mm_movehl_ps(vmax, vmax);
vidx = _mm_unpackhi_epi64(vidx, vidx);
output += 2;
index += 2;
}
if (c & 1) {
_mm_store_ss(output, vmax);
*index = (uint32_t) _mm_cvtsi128_si32(vidx);
output += 1;
index += 1;
}
}
input = (const float**) ((uintptr_t) input + input_increment);
output = (float*) ((uintptr_t) output + output_increment);
index = (uint32_t*) index;
} while (--output_pixels != 0);
}
| 6,961 | 33.81 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-argmaxpool/f32-argmaxpool-9x-wasmsimd-c4.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/argmaxpool.h>
void xnn_f32_argmaxpool_ukernel_9x__wasmsimd_c4(
size_t output_pixels,
size_t pooling_elements,
size_t channels,
const float** input,
size_t input_offset,
float* output,
uint32_t* index_ptr,
size_t input_increment,
size_t output_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(pooling_elements != 0);
assert(pooling_elements <= 9);
assert(channels != 0);
float* index = (float*) index_ptr;
do {
const float* i0 = input[0];
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
const float* i8 = input[8];
i0 = (const float*) ((uintptr_t) i0 + input_offset);
i1 = (const float*) ((uintptr_t) i1 + input_offset);
i2 = (const float*) ((uintptr_t) i2 + input_offset);
i3 = (const float*) ((uintptr_t) i3 + input_offset);
i4 = (const float*) ((uintptr_t) i4 + input_offset);
i5 = (const float*) ((uintptr_t) i5 + input_offset);
i6 = (const float*) ((uintptr_t) i6 + input_offset);
i7 = (const float*) ((uintptr_t) i7 + input_offset);
i8 = (const float*) ((uintptr_t) i8 + input_offset);
if (pooling_elements < 2) {
i1 = i0;
}
if (pooling_elements <= 2) {
i2 = i0;
}
if (pooling_elements < 4) {
i3 = i0;
}
if (pooling_elements <= 4) {
i4 = i0;
}
if (pooling_elements < 6) {
i5 = i0;
}
if (pooling_elements <= 6) {
i6 = i0;
}
if (pooling_elements < 8) {
i7 = i0;
}
if (pooling_elements <= 8) {
i8 = i0;
}
size_t c = channels;
for (; c >= 4; c -= 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
const v128_t vi8 = wasm_v128_load(i8);
i8 += 4;
v128_t vmax = vi0;
v128_t vidx = wasm_i32x4_const_splat(0);
const v128_t vm1 = wasm_f32x4_gt(vi1, vmax);
vmax = wasm_v128_bitselect(vi1, vmax, vm1);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(1), vidx, vm1);
const v128_t vm2 = wasm_f32x4_gt(vi2, vmax);
vmax = wasm_v128_bitselect(vi2, vmax, vm2);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(2), vidx, vm2);
const v128_t vm3 = wasm_f32x4_gt(vi3, vmax);
vmax = wasm_v128_bitselect(vi3, vmax, vm3);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(3), vidx, vm3);
const v128_t vm4 = wasm_f32x4_gt(vi4, vmax);
vmax = wasm_v128_bitselect(vi4, vmax, vm4);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(4), vidx, vm4);
const v128_t vm5 = wasm_f32x4_gt(vi5, vmax);
vmax = wasm_v128_bitselect(vi5, vmax, vm5);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(5), vidx, vm5);
const v128_t vm6 = wasm_f32x4_gt(vi6, vmax);
vmax = wasm_v128_bitselect(vi6, vmax, vm6);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(6), vidx, vm6);
const v128_t vm7 = wasm_f32x4_gt(vi7, vmax);
vmax = wasm_v128_bitselect(vi7, vmax, vm7);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(7), vidx, vm7);
const v128_t vm8 = wasm_f32x4_gt(vi8, vmax);
vmax = wasm_v128_bitselect(vi8, vmax, vm8);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(8), vidx, vm8);
wasm_v128_store(output, vmax);
output += 4;
wasm_v128_store(index, vidx);
index += 4;
}
if (c != 0) {
const v128_t vi0 = wasm_v128_load(i0);
const v128_t vi1 = wasm_v128_load(i1);
const v128_t vi2 = wasm_v128_load(i2);
const v128_t vi3 = wasm_v128_load(i3);
const v128_t vi4 = wasm_v128_load(i4);
const v128_t vi5 = wasm_v128_load(i5);
const v128_t vi6 = wasm_v128_load(i6);
const v128_t vi7 = wasm_v128_load(i7);
const v128_t vi8 = wasm_v128_load(i8);
v128_t vmax = vi0;
v128_t vidx = wasm_i32x4_const_splat(0);
const v128_t vm1 = wasm_f32x4_gt(vi1, vmax);
vmax = wasm_v128_bitselect(vi1, vmax, vm1);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(1), vidx, vm1);
const v128_t vm2 = wasm_f32x4_gt(vi2, vmax);
vmax = wasm_v128_bitselect(vi2, vmax, vm2);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(2), vidx, vm2);
const v128_t vm3 = wasm_f32x4_gt(vi3, vmax);
vmax = wasm_v128_bitselect(vi3, vmax, vm3);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(3), vidx, vm3);
const v128_t vm4 = wasm_f32x4_gt(vi4, vmax);
vmax = wasm_v128_bitselect(vi4, vmax, vm4);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(4), vidx, vm4);
const v128_t vm5 = wasm_f32x4_gt(vi5, vmax);
vmax = wasm_v128_bitselect(vi5, vmax, vm5);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(5), vidx, vm5);
const v128_t vm6 = wasm_f32x4_gt(vi6, vmax);
vmax = wasm_v128_bitselect(vi6, vmax, vm6);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(6), vidx, vm6);
const v128_t vm7 = wasm_f32x4_gt(vi7, vmax);
vmax = wasm_v128_bitselect(vi7, vmax, vm7);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(7), vidx, vm7);
const v128_t vm8 = wasm_f32x4_gt(vi8, vmax);
vmax = wasm_v128_bitselect(vi8, vmax, vm8);
vidx = wasm_v128_bitselect(wasm_i32x4_const_splat(8), vidx, vm8);
if (c & 2) {
wasm_v128_store64_lane(output, vmax, 0);
wasm_v128_store64_lane(index, vidx, 0);
vmax = wasm_v64x2_shuffle(vmax, vmax, 1, 1);
vidx = wasm_v64x2_shuffle(vidx, vidx, 1, 1);
output += 2;
index += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vmax, 0);
wasm_v128_store32_lane(index, vidx, 0);
output += 1;
index += 1;
}
}
input = (const float**) ((uintptr_t) input + input_increment);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 6,608 | 32.045 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-avgpool/f32-avgpool-9p8x-minmax-neon-c4.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/avgpool.h>
void xnn_f32_avgpool_minmax_ukernel_9p8x__neon_c4(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
float* buffer,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements > 9);
assert(channels != 0);
const float32x4_t vscale = vld1q_dup_f32(¶ms->scalar.scale);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
do {
{
const float* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = *input++;
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
float* b = buffer;
for (size_t c = 0; c < channels; c += 4) {
const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi6 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi7 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi8 = vld1q_f32(i8); i8 += 4;
const float32x4_t vsum01 = vaddq_f32(vi0, vi1);
const float32x4_t vsum23 = vaddq_f32(vi2, vi3);
const float32x4_t vsum45 = vaddq_f32(vi4, vi5);
const float32x4_t vsum67 = vaddq_f32(vi6, vi7);
const float32x4_t vsum018 = vaddq_f32(vsum01, vi8);
const float32x4_t vsum2345 = vaddq_f32(vsum23, vsum45);
const float32x4_t vsum01678 = vaddq_f32(vsum018, vsum67);
const float32x4_t vsum = vaddq_f32(vsum2345, vsum01678);
vst1q_f32(b, vsum); b += 4;
}
}
size_t k = kernel_elements;
for (k -= 9; k > 8; k -= 8) {
const float* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
float* b = buffer;
for (size_t c = 0; c < channels; c += 4) {
const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi6 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi7 = vld1q_f32(i7); i7 += 4;
const float32x4_t vacc = vld1q_f32(b);
const float32x4_t vsum01 = vaddq_f32(vi0, vi1);
const float32x4_t vsum23 = vaddq_f32(vi2, vi3);
const float32x4_t vsum45 = vaddq_f32(vi4, vi5);
const float32x4_t vsum67 = vaddq_f32(vi6, vi7);
const float32x4_t vsum01a = vaddq_f32(vsum01, vacc);
const float32x4_t vsum2345 = vaddq_f32(vsum23, vsum45);
const float32x4_t vsum0167a = vaddq_f32(vsum01a, vsum67);
const float32x4_t vsum = vaddq_f32(vsum2345, vsum0167a);
vst1q_f32(b, vsum); b += 4;
}
}
assert(k >= 1);
{
const float* i0 = input[0];
assert(i0 != NULL);
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
input = (const float**) ((uintptr_t) input + input_increment);
if (k < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (k <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (k < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (k <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (k < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (k <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (k < 8) {
i7 = zero;
}
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
size_t c = channels;
float* b = buffer;
while (c >= 4) {
const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi6 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi7 = vld1q_f32(i7); i7 += 4;
const float32x4_t vacc = vld1q_f32(b); b += 4;
const float32x4_t vsum01 = vaddq_f32(vi0, vi1);
const float32x4_t vsum23 = vaddq_f32(vi2, vi3);
const float32x4_t vsum45 = vaddq_f32(vi4, vi5);
const float32x4_t vsum67 = vaddq_f32(vi6, vi7);
const float32x4_t vsum01a = vaddq_f32(vsum01, vacc);
const float32x4_t vsum2345 = vaddq_f32(vsum23, vsum45);
const float32x4_t vsum0167a = vaddq_f32(vsum01a, vsum67);
const float32x4_t vsum = vaddq_f32(vsum2345, vsum0167a);
float32x4_t vout = vmulq_f32(vsum, vscale);
vout = vmaxq_f32(vout, vmin);
vout = vminq_f32(vout, vmax);
vst1q_f32(output, vout); output += 4;
c -= 4;
}
if (c != 0) {
const float32x4_t vi0 = vld1q_f32(i0);
const float32x4_t vi1 = vld1q_f32(i1);
const float32x4_t vi2 = vld1q_f32(i2);
const float32x4_t vi3 = vld1q_f32(i3);
const float32x4_t vi4 = vld1q_f32(i4);
const float32x4_t vi5 = vld1q_f32(i5);
const float32x4_t vi6 = vld1q_f32(i6);
const float32x4_t vi7 = vld1q_f32(i7);
const float32x4_t vacc = vld1q_f32(b);
const float32x4_t vsum01 = vaddq_f32(vi0, vi1);
const float32x4_t vsum23 = vaddq_f32(vi2, vi3);
const float32x4_t vsum45 = vaddq_f32(vi4, vi5);
const float32x4_t vsum67 = vaddq_f32(vi6, vi7);
const float32x4_t vsum01a = vaddq_f32(vsum01, vacc);
const float32x4_t vsum2345 = vaddq_f32(vsum23, vsum45);
const float32x4_t vsum0167a = vaddq_f32(vsum01a, vsum67);
const float32x4_t vsum = vaddq_f32(vsum2345, vsum0167a);
float32x4_t vout = vmulq_f32(vsum, vscale);
vout = vmaxq_f32(vout, vmin);
vout = vminq_f32(vout, vmax);
float32x2_t vout_lo = vget_low_f32(vout);
if (c & 2) {
vst1_f32(output, vout_lo); output += 2;
vout_lo = vget_high_f32(vout);
}
if (c & 1) {
vst1_lane_f32(output, vout_lo, 0); output += 1;
}
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 10,564 | 33.413681 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-avgpool/f32-avgpool-9p8x-minmax-scalar-c1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/avgpool.h>
#include <xnnpack/math.h>
void xnn_f32_avgpool_minmax_ukernel_9p8x__scalar_c1(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
float* buffer,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(output_pixels != 0);
assert(kernel_elements > 9);
assert(channels != 0);
const float vscale = params->scalar.scale;
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
{
const float* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = *input++;
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
float* b = buffer;
size_t c = channels;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vi7 = *i7++;
const float vi8 = *i8++;
const float vsum01 = vi0 + vi1;
const float vsum23 = vi2 + vi3;
const float vsum45 = vi4 + vi5;
const float vsum67 = vi6 + vi7;
const float vsum018 = vsum01 + vi8;
const float vsum2345 = vsum23 + vsum45;
const float vsum01678 = vsum018 + vsum67;
const float vsum = vsum2345 + vsum01678;
*b++ = vsum;
} while (--c != 0);
}
size_t k = kernel_elements;
for (k -= 9; k > 8; k -= 8) {
const float* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
float* b = buffer;
size_t c = channels;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vi7 = *i7++;
const float vacc = *b;
const float vsum01 = vi0 + vi1;
const float vsum23 = vi2 + vi3;
const float vsum45 = vi4 + vi5;
const float vsum67 = vi6 + vi7;
const float vsum01a = vsum01 + vacc;
const float vsum2345 = vsum23 + vsum45;
const float vsum0167a = vsum01a + vsum67;
const float vsum = vsum2345 + vsum0167a;
*b++ = vsum;
} while (--c != 0);
}
{
const float* i0 = input[0];
assert(i0 != NULL);
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
input = (const float**) ((uintptr_t) input + input_increment);
if (k < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (k <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (k < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (k <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (k < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (k <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (k < 8) {
i7 = zero;
}
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
size_t c = channels;
float* b = buffer;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vi7 = *i7++;
const float vacc = *b++;
const float vsum01 = vi0 + vi1;
const float vsum23 = vi2 + vi3;
const float vsum45 = vi4 + vi5;
const float vsum67 = vi6 + vi7;
const float vsum01a = vsum01 + vacc;
const float vsum2345 = vsum23 + vsum45;
const float vsum0167a = vsum01a + vsum67;
const float vsum = vsum2345 + vsum0167a;
float vout = vsum * vscale;
vout = math_max_f32(vout, vmin);
vout = math_min_f32(vout, vmax);
*output++ = vout;
} while (--c != 0);
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 8,092 | 28.753676 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-avgpool/f32-avgpool-9p8x-minmax-sse-c4.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/avgpool.h>
void xnn_f32_avgpool_minmax_ukernel_9p8x__sse_c4(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
float* buffer,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements > 9);
assert(channels != 0);
const __m128 vscale = _mm_load_ps(params->sse.scale);
const __m128 vmin = _mm_load_ps(params->sse.min);
const __m128 vmax = _mm_load_ps(params->sse.max);
do {
{
const float* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = *input++;
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
float* b = buffer;
for (size_t c = 0; c < channels; c += 4) {
const __m128 vi0 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi5 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vi6 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vi7 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vi8 = _mm_loadu_ps(i8);
i8 += 4;
const __m128 vsum01 = _mm_add_ps(vi0, vi1);
const __m128 vsum23 = _mm_add_ps(vi2, vi3);
const __m128 vsum45 = _mm_add_ps(vi4, vi5);
const __m128 vsum67 = _mm_add_ps(vi6, vi7);
const __m128 vsum018 = _mm_add_ps(vsum01, vi8);
const __m128 vsum2345 = _mm_add_ps(vsum23, vsum45);
const __m128 vsum01678 = _mm_add_ps(vsum018, vsum67);
const __m128 vsum = _mm_add_ps(vsum2345, vsum01678);
_mm_store_ps(b, vsum); b += 4;
}
}
size_t k = kernel_elements;
for (k -= 9; k > 8; k -= 8) {
const float* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
float* b = buffer;
for (size_t c = 0; c < channels; c += 4) {
const __m128 vi0 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi5 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vi6 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vi7 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vacc = _mm_load_ps(b);
const __m128 vsum01 = _mm_add_ps(vi0, vi1);
const __m128 vsum23 = _mm_add_ps(vi2, vi3);
const __m128 vsum45 = _mm_add_ps(vi4, vi5);
const __m128 vsum67 = _mm_add_ps(vi6, vi7);
const __m128 vsum01a = _mm_add_ps(vsum01, vacc);
const __m128 vsum2345 = _mm_add_ps(vsum23, vsum45);
const __m128 vsum0167a = _mm_add_ps(vsum01a, vsum67);
const __m128 vsum = _mm_add_ps(vsum2345, vsum0167a);
_mm_store_ps(b, vsum); b += 4;
}
}
{
const float* i0 = input[0];
assert(i0 != NULL);
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
input = (const float**) ((uintptr_t) input + input_increment);
if (k < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (k <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (k < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (k <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (k < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (k <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (k < 8) {
i7 = zero;
}
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
size_t c = channels;
float* b = buffer;
while (c >= 4) {
const __m128 vi0 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi5 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vi6 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vi7 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vacc = _mm_load_ps(b);
b += 4;
const __m128 vsum01 = _mm_add_ps(vi0, vi1);
const __m128 vsum23 = _mm_add_ps(vi2, vi3);
const __m128 vsum45 = _mm_add_ps(vi4, vi5);
const __m128 vsum67 = _mm_add_ps(vi6, vi7);
const __m128 vsum01a = _mm_add_ps(vsum01, vacc);
const __m128 vsum2345 = _mm_add_ps(vsum23, vsum45);
const __m128 vsum0167a = _mm_add_ps(vsum01a, vsum67);
const __m128 vsum = _mm_add_ps(vsum2345, vsum0167a);
__m128 vout = _mm_mul_ps(vsum, vscale);
vout = _mm_max_ps(vout, vmin);
vout = _mm_min_ps(vout, vmax);
_mm_storeu_ps(output, vout);
output += 4;
c -= 4;
}
if (c != 0) {
const __m128 vi0 = _mm_loadu_ps(i0);
const __m128 vi1 = _mm_loadu_ps(i1);
const __m128 vi2 = _mm_loadu_ps(i2);
const __m128 vi3 = _mm_loadu_ps(i3);
const __m128 vi4 = _mm_loadu_ps(i4);
const __m128 vi5 = _mm_loadu_ps(i5);
const __m128 vi6 = _mm_loadu_ps(i6);
const __m128 vi7 = _mm_loadu_ps(i7);
const __m128 vacc = _mm_load_ps(b);
const __m128 vsum01 = _mm_add_ps(vi0, vi1);
const __m128 vsum23 = _mm_add_ps(vi2, vi3);
const __m128 vsum45 = _mm_add_ps(vi4, vi5);
const __m128 vsum67 = _mm_add_ps(vi6, vi7);
const __m128 vsum01a = _mm_add_ps(vsum01, vacc);
const __m128 vsum2345 = _mm_add_ps(vsum23, vsum45);
const __m128 vsum0167a = _mm_add_ps(vsum01a, vsum67);
const __m128 vsum = _mm_add_ps(vsum2345, vsum0167a);
__m128 vout = _mm_mul_ps(vsum, vscale);
vout = _mm_max_ps(vout, vmin);
vout = _mm_min_ps(vout, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vout);
vout = _mm_movehl_ps(vout, vout);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vout);
output += 1;
}
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 10,507 | 30.461078 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-avgpool/f32-avgpool-9p8x-minmax-wasm-c1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/avgpool.h>
#include <xnnpack/math.h>
void xnn_f32_avgpool_minmax_ukernel_9p8x__wasm_c1(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
float* buffer,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(output_pixels != 0);
assert(kernel_elements > 9);
assert(channels != 0);
const float vscale = params->scalar.scale;
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
{
const float* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = *input++;
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
float* b = buffer;
size_t c = channels;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vi7 = *i7++;
const float vi8 = *i8++;
const float vsum01 = vi0 + vi1;
const float vsum23 = vi2 + vi3;
const float vsum45 = vi4 + vi5;
const float vsum67 = vi6 + vi7;
const float vsum018 = vsum01 + vi8;
const float vsum2345 = vsum23 + vsum45;
const float vsum01678 = vsum018 + vsum67;
const float vsum = vsum2345 + vsum01678;
*b++ = vsum;
} while (--c != 0);
}
size_t k = kernel_elements;
for (k -= 9; k > 8; k -= 8) {
const float* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
float* b = buffer;
size_t c = channels;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vi7 = *i7++;
const float vacc = *b;
const float vsum01 = vi0 + vi1;
const float vsum23 = vi2 + vi3;
const float vsum45 = vi4 + vi5;
const float vsum67 = vi6 + vi7;
const float vsum01a = vsum01 + vacc;
const float vsum2345 = vsum23 + vsum45;
const float vsum0167a = vsum01a + vsum67;
const float vsum = vsum2345 + vsum0167a;
*b++ = vsum;
} while (--c != 0);
}
{
const float* i0 = input[0];
assert(i0 != NULL);
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
input = (const float**) ((uintptr_t) input + input_increment);
if (k < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (k <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (k < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (k <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (k < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (k <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (k < 8) {
i7 = zero;
}
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
size_t c = channels;
float* b = buffer;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vi7 = *i7++;
const float vacc = *b++;
const float vsum01 = vi0 + vi1;
const float vsum23 = vi2 + vi3;
const float vsum45 = vi4 + vi5;
const float vsum67 = vi6 + vi7;
const float vsum01a = vsum01 + vacc;
const float vsum2345 = vsum23 + vsum45;
const float vsum0167a = vsum01a + vsum67;
const float vsum = vsum2345 + vsum0167a;
float vout = vsum * vscale;
vout = __builtin_wasm_max_f32(vout, vmin);
vout = __builtin_wasm_min_f32(vout, vmax);
*output++ = vout;
} while (--c != 0);
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 8,110 | 28.819853 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-avgpool/f32-avgpool-9p8x-minmax-wasmsimd-arm-c4.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/avgpool.h>
void xnn_f32_avgpool_minmax_ukernel_9p8x__wasmsimd_arm_c4(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
float* buffer,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements > 9);
assert(channels != 0);
const v128_t vscale = wasm_v128_load32_splat(¶ms->scalar.scale);
const v128_t vmin = wasm_v128_load32_splat(¶ms->scalar.min);
const v128_t vmax = wasm_v128_load32_splat(¶ms->scalar.max);
do {
{
const float* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = *input++;
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
float* b = buffer;
for (size_t c = 0; c < channels; c += 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
const v128_t vi8 = wasm_v128_load(i8);
i8 += 4;
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum67 = wasm_f32x4_add(vi6, vi7);
const v128_t vsum018 = wasm_f32x4_add(vsum01, vi8);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum01678 = wasm_f32x4_add(vsum018, vsum67);
const v128_t vsum = wasm_f32x4_add(vsum2345, vsum01678);
wasm_v128_store(b, vsum);
b += 4;
}
}
size_t k = kernel_elements;
for (k -= 9; k > 8; k -= 8) {
const float* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
float* b = buffer;
for (size_t c = 0; c < channels; c += 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
const v128_t vacc = wasm_v128_load(b);
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum67 = wasm_f32x4_add(vi6, vi7);
const v128_t vsum01a = wasm_f32x4_add(vsum01, vacc);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum0167a = wasm_f32x4_add(vsum01a, vsum67);
const v128_t vsum = wasm_f32x4_add(vsum2345, vsum0167a);
wasm_v128_store(b, vsum);
b += 4;
}
}
{
const float* i0 = input[0];
assert(i0 != NULL);
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
input = (const float**) ((uintptr_t) input + input_increment);
if (k < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (k <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (k < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (k <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (k < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (k <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (k < 8) {
i7 = zero;
}
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
size_t c = channels;
float* b = buffer;
while (c >= 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
const v128_t vacc = wasm_v128_load(b);
b += 4;
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum67 = wasm_f32x4_add(vi6, vi7);
const v128_t vsum01a = wasm_f32x4_add(vsum01, vacc);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum0167a = wasm_f32x4_add(vsum01a, vsum67);
const v128_t vsum = wasm_f32x4_add(vsum2345, vsum0167a);
v128_t vout = wasm_f32x4_mul(vsum, vscale);
vout = wasm_f32x4_max(vout, vmin);
vout = wasm_f32x4_min(vout, vmax);
wasm_v128_store(output, vout);
output += 4;
c -= 4;
}
if (c != 0) {
const v128_t vi0 = wasm_v128_load(i0);
const v128_t vi1 = wasm_v128_load(i1);
const v128_t vi2 = wasm_v128_load(i2);
const v128_t vi3 = wasm_v128_load(i3);
const v128_t vi4 = wasm_v128_load(i4);
const v128_t vi5 = wasm_v128_load(i5);
const v128_t vi6 = wasm_v128_load(i6);
const v128_t vi7 = wasm_v128_load(i7);
const v128_t vacc = wasm_v128_load(b);
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum67 = wasm_f32x4_add(vi6, vi7);
const v128_t vsum01a = wasm_f32x4_add(vsum01, vacc);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum0167a = wasm_f32x4_add(vsum01a, vsum67);
const v128_t vsum = wasm_f32x4_add(vsum2345, vsum0167a);
v128_t vout = wasm_f32x4_mul(vsum, vscale);
vout = wasm_f32x4_max(vout, vmin);
vout = wasm_f32x4_min(vout, vmax);
if (c & 2) {
wasm_v128_store64_lane(output, vout, 0);
vout = wasm_v64x2_shuffle(vout, vout, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vout, 0);
output += 1;
}
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 10,842 | 31.270833 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-avgpool/f32-avgpool-9p8x-minmax-wasmsimd-x86-c4.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/avgpool.h>
void xnn_f32_avgpool_minmax_ukernel_9p8x__wasmsimd_x86_c4(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
float* buffer,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements > 9);
assert(channels != 0);
const v128_t vscale = wasm_v128_load32_splat(¶ms->scalar.scale);
const v128_t vmin = wasm_v128_load32_splat(¶ms->scalar.min);
const v128_t vmax = wasm_v128_load32_splat(¶ms->scalar.max);
do {
{
const float* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = *input++;
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
float* b = buffer;
for (size_t c = 0; c < channels; c += 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
const v128_t vi8 = wasm_v128_load(i8);
i8 += 4;
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum67 = wasm_f32x4_add(vi6, vi7);
const v128_t vsum018 = wasm_f32x4_add(vsum01, vi8);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum01678 = wasm_f32x4_add(vsum018, vsum67);
const v128_t vsum = wasm_f32x4_add(vsum2345, vsum01678);
wasm_v128_store(b, vsum);
b += 4;
}
}
size_t k = kernel_elements;
for (k -= 9; k > 8; k -= 8) {
const float* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
float* b = buffer;
for (size_t c = 0; c < channels; c += 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
const v128_t vacc = wasm_v128_load(b);
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum67 = wasm_f32x4_add(vi6, vi7);
const v128_t vsum01a = wasm_f32x4_add(vsum01, vacc);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum0167a = wasm_f32x4_add(vsum01a, vsum67);
const v128_t vsum = wasm_f32x4_add(vsum2345, vsum0167a);
wasm_v128_store(b, vsum);
b += 4;
}
}
{
const float* i0 = input[0];
assert(i0 != NULL);
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
input = (const float**) ((uintptr_t) input + input_increment);
if (k < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (k <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (k < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (k <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (k < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (k <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (k < 8) {
i7 = zero;
}
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
size_t c = channels;
float* b = buffer;
while (c >= 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
const v128_t vacc = wasm_v128_load(b);
b += 4;
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum67 = wasm_f32x4_add(vi6, vi7);
const v128_t vsum01a = wasm_f32x4_add(vsum01, vacc);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum0167a = wasm_f32x4_add(vsum01a, vsum67);
const v128_t vsum = wasm_f32x4_add(vsum2345, vsum0167a);
v128_t vout = wasm_f32x4_mul(vsum, vscale);
vout = wasm_f32x4_pmax(vmin, vout);
vout = wasm_f32x4_pmin(vmax, vout);
wasm_v128_store(output, vout);
output += 4;
c -= 4;
}
if (c != 0) {
const v128_t vi0 = wasm_v128_load(i0);
const v128_t vi1 = wasm_v128_load(i1);
const v128_t vi2 = wasm_v128_load(i2);
const v128_t vi3 = wasm_v128_load(i3);
const v128_t vi4 = wasm_v128_load(i4);
const v128_t vi5 = wasm_v128_load(i5);
const v128_t vi6 = wasm_v128_load(i6);
const v128_t vi7 = wasm_v128_load(i7);
const v128_t vacc = wasm_v128_load(b);
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum67 = wasm_f32x4_add(vi6, vi7);
const v128_t vsum01a = wasm_f32x4_add(vsum01, vacc);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum0167a = wasm_f32x4_add(vsum01a, vsum67);
const v128_t vsum = wasm_f32x4_add(vsum2345, vsum0167a);
v128_t vout = wasm_f32x4_mul(vsum, vscale);
vout = wasm_f32x4_pmax(vmin, vout);
vout = wasm_f32x4_pmin(vmax, vout);
if (c & 2) {
wasm_v128_store64_lane(output, vout, 0);
vout = wasm_v64x2_shuffle(vout, vout, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vout, 0);
output += 1;
}
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 10,846 | 31.282738 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-avgpool/f32-avgpool-9x-minmax-neon-c4.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/avgpool.h>
void xnn_f32_avgpool_minmax_ukernel_9x__neon_c4(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(kernel_elements <= 9);
assert(channels != 0);
const float32x4_t vscale = vld1q_dup_f32(¶ms->scalar.scale);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
const float* i8 = input[8];
input = (const float**) ((uintptr_t) input + input_increment);
if (kernel_elements < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (kernel_elements <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (kernel_elements < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (kernel_elements <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (kernel_elements < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (kernel_elements <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (kernel_elements < 8) {
i7 = zero;
}
assert(i7 != NULL);
if (kernel_elements <= 8) {
i8 = zero;
}
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
while (c >= 4) {
const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi6 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi7 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi8 = vld1q_f32(i8); i8 += 4;
const float32x4_t vsum01 = vaddq_f32(vi0, vi1);
const float32x4_t vsum23 = vaddq_f32(vi2, vi3);
const float32x4_t vsum45 = vaddq_f32(vi4, vi5);
const float32x4_t vsum67 = vaddq_f32(vi6, vi7);
const float32x4_t vsum018 = vaddq_f32(vsum01, vi8);
const float32x4_t vsum2345 = vaddq_f32(vsum23, vsum45);
const float32x4_t vsum01678 = vaddq_f32(vsum018, vsum67);
const float32x4_t vsum = vaddq_f32(vsum2345, vsum01678);
float32x4_t vout = vmulq_f32(vsum, vscale);
vout = vmaxq_f32(vout, vmin);
vout = vminq_f32(vout, vmax);
vst1q_f32(output, vout); output += 4;
c -= 4;
}
if (c != 0) {
const float32x4_t vi0 = vld1q_f32(i0);
const float32x4_t vi1 = vld1q_f32(i1);
const float32x4_t vi2 = vld1q_f32(i2);
const float32x4_t vi3 = vld1q_f32(i3);
const float32x4_t vi4 = vld1q_f32(i4);
const float32x4_t vi5 = vld1q_f32(i5);
const float32x4_t vi6 = vld1q_f32(i6);
const float32x4_t vi7 = vld1q_f32(i7);
const float32x4_t vi8 = vld1q_f32(i8);
const float32x4_t vsum01 = vaddq_f32(vi0, vi1);
const float32x4_t vsum23 = vaddq_f32(vi2, vi3);
const float32x4_t vsum45 = vaddq_f32(vi4, vi5);
const float32x4_t vsum67 = vaddq_f32(vi6, vi7);
const float32x4_t vsum018 = vaddq_f32(vsum01, vi8);
const float32x4_t vsum2345 = vaddq_f32(vsum23, vsum45);
const float32x4_t vsum01678 = vaddq_f32(vsum018, vsum67);
const float32x4_t vsum = vaddq_f32(vsum2345, vsum01678);
float32x4_t vout = vmulq_f32(vsum, vscale);
vout = vmaxq_f32(vout, vmin);
vout = vminq_f32(vout, vmax);
float32x2_t vout_lo = vget_low_f32(vout);
if (c & 2) {
vst1_f32(output, vout_lo); output += 2;
vout_lo = vget_high_f32(vout);
}
if (c & 1) {
vst1_lane_f32(output, vout_lo, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 5,421 | 30.707602 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-avgpool/f32-avgpool-9x-minmax-scalar-c1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/avgpool.h>
#include <xnnpack/math.h>
void xnn_f32_avgpool_minmax_ukernel_9x__scalar_c1(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(kernel_elements <= 9);
assert(channels != 0);
const float vscale = params->scalar.scale;
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
const float* i8 = input[8];
input = (const float**) ((uintptr_t) input + input_increment);
if (kernel_elements < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (kernel_elements <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (kernel_elements < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (kernel_elements <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (kernel_elements < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (kernel_elements <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (kernel_elements < 8) {
i7 = zero;
}
assert(i7 != NULL);
if (kernel_elements <= 8) {
i8 = zero;
}
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vi7 = *i7++;
const float vi8 = *i8++;
const float vsum01 = vi0 + vi1;
const float vsum23 = vi2 + vi3;
const float vsum45 = vi4 + vi5;
const float vsum67 = vi6 + vi7;
const float vsum018 = vsum01 + vi8;
const float vsum2345 = vsum23 + vsum45;
const float vsum01678 = vsum018 + vsum67;
const float vsum = vsum2345 + vsum01678;
float vout = vsum * vscale;
vout = math_max_f32(vout, vmin);
vout = math_min_f32(vout, vmax);
*output++ = vout;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 3,710 | 26.488889 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-avgpool/f32-avgpool-9x-minmax-sse-c4.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/avgpool.h>
void xnn_f32_avgpool_minmax_ukernel_9x__sse_c4(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(kernel_elements <= 9);
assert(channels != 0);
const __m128 vscale = _mm_load_ps(params->sse.scale);
const __m128 vmin = _mm_load_ps(params->sse.min);
const __m128 vmax = _mm_load_ps(params->sse.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
const float* i8 = input[8];
input = (const float**) ((uintptr_t) input + input_increment);
if (kernel_elements < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (kernel_elements <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (kernel_elements < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (kernel_elements <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (kernel_elements < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (kernel_elements <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (kernel_elements < 8) {
i7 = zero;
}
assert(i7 != NULL);
if (kernel_elements <= 8) {
i8 = zero;
}
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
while (c >= 4) {
const __m128 vi0 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi5 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vi6 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vi7 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vi8 = _mm_loadu_ps(i8);
i8 += 4;
const __m128 vsum018 = _mm_add_ps(_mm_add_ps(vi0, vi1), vi8);
const __m128 vsum23 = _mm_add_ps(vi2, vi3);
const __m128 vsum45 = _mm_add_ps(vi4, vi5);
const __m128 vsum67 = _mm_add_ps(vi6, vi7);
const __m128 vsum2345 = _mm_add_ps(vsum23, vsum45);
const __m128 vsum01678 = _mm_add_ps(vsum018, vsum67);
const __m128 vsum = _mm_add_ps(vsum2345, vsum01678);
__m128 vout = _mm_mul_ps(vsum, vscale);
vout = _mm_max_ps(vout, vmin);
vout = _mm_min_ps(vout, vmax);
_mm_storeu_ps(output, vout); output += 4;
c -= 4;
}
if (c != 0) {
const __m128 vi0 = _mm_loadu_ps(i0);
const __m128 vi1 = _mm_loadu_ps(i1);
const __m128 vi2 = _mm_loadu_ps(i2);
const __m128 vi3 = _mm_loadu_ps(i3);
const __m128 vi4 = _mm_loadu_ps(i4);
const __m128 vi5 = _mm_loadu_ps(i5);
const __m128 vi6 = _mm_loadu_ps(i6);
const __m128 vi7 = _mm_loadu_ps(i7);
const __m128 vi8 = _mm_loadu_ps(i8);
const __m128 vsum01 = _mm_add_ps(vi0, vi1);
const __m128 vsum23 = _mm_add_ps(vi2, vi3);
const __m128 vsum45 = _mm_add_ps(vi4, vi5);
const __m128 vsum67 = _mm_add_ps(vi6, vi7);
const __m128 vsum018 = _mm_add_ps(vsum01, vi8);
const __m128 vsum2345 = _mm_add_ps(vsum23, vsum45);
const __m128 vsum01678 = _mm_add_ps(vsum018, vsum67);
const __m128 vsum = _mm_add_ps(vsum2345, vsum01678);
__m128 vout = _mm_mul_ps(vsum, vscale);
vout = _mm_max_ps(vout, vmin);
vout = _mm_min_ps(vout, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vout);
vout = _mm_movehl_ps(vout, vout);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vout);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 5,282 | 28.187845 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-avgpool/f32-avgpool-9x-minmax-wasm-c1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/avgpool.h>
#include <xnnpack/math.h>
void xnn_f32_avgpool_minmax_ukernel_9x__wasm_c1(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(kernel_elements <= 9);
assert(channels != 0);
const float vscale = params->scalar.scale;
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
const float* i8 = input[8];
input = (const float**) ((uintptr_t) input + input_increment);
if (kernel_elements < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (kernel_elements <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (kernel_elements < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (kernel_elements <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (kernel_elements < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (kernel_elements <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (kernel_elements < 8) {
i7 = zero;
}
assert(i7 != NULL);
if (kernel_elements <= 8) {
i8 = zero;
}
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vi7 = *i7++;
const float vi8 = *i8++;
const float vsum01 = vi0 + vi1;
const float vsum23 = vi2 + vi3;
const float vsum45 = vi4 + vi5;
const float vsum67 = vi6 + vi7;
const float vsum018 = vsum01 + vi8;
const float vsum2345 = vsum23 + vsum45;
const float vsum01678 = vsum018 + vsum67;
const float vsum = vsum2345 + vsum01678;
float vout = vsum * vscale;
vout = __builtin_wasm_max_f32(vout, vmin);
vout = __builtin_wasm_min_f32(vout, vmax);
*output++ = vout;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 3,728 | 26.622222 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-avgpool/f32-avgpool-9x-minmax-wasmsimd-arm-c4.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/avgpool.h>
void xnn_f32_avgpool_minmax_ukernel_9x__wasmsimd_arm_c4(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(kernel_elements <= 9);
assert(channels != 0);
const v128_t vscale = wasm_v128_load32_splat(¶ms->scalar.scale);
const v128_t vmin = wasm_v128_load32_splat(¶ms->scalar.min);
const v128_t vmax = wasm_v128_load32_splat(¶ms->scalar.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
const float* i8 = input[8];
input = (const float**) ((uintptr_t) input + input_increment);
if (kernel_elements < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (kernel_elements <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (kernel_elements < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (kernel_elements <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (kernel_elements < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (kernel_elements <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (kernel_elements < 8) {
i7 = zero;
}
assert(i7 != NULL);
if (kernel_elements <= 8) {
i8 = zero;
}
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
while (c >= 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
const v128_t vi8 = wasm_v128_load(i8);
i8 += 4;
const v128_t vsum018 = wasm_f32x4_add(wasm_f32x4_add(vi0, vi1), vi8);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum67 = wasm_f32x4_add(vi6, vi7);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum01678 = wasm_f32x4_add(vsum018, vsum67);
const v128_t vsum = wasm_f32x4_add(vsum2345, vsum01678);
v128_t vout = wasm_f32x4_mul(vsum, vscale);
vout = wasm_f32x4_max(vout, vmin);
vout = wasm_f32x4_min(vout, vmax);
wasm_v128_store(output, vout);
output += 4;
c -= 4;
}
if (c != 0) {
const v128_t vi0 = wasm_v128_load(i0);
const v128_t vi1 = wasm_v128_load(i1);
const v128_t vi2 = wasm_v128_load(i2);
const v128_t vi3 = wasm_v128_load(i3);
const v128_t vi4 = wasm_v128_load(i4);
const v128_t vi5 = wasm_v128_load(i5);
const v128_t vi6 = wasm_v128_load(i6);
const v128_t vi7 = wasm_v128_load(i7);
const v128_t vi8 = wasm_v128_load(i8);
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum67 = wasm_f32x4_add(vi6, vi7);
const v128_t vsum018 = wasm_f32x4_add(vsum01, vi8);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum01678 = wasm_f32x4_add(vsum018, vsum67);
const v128_t vsum = wasm_f32x4_add(vsum2345, vsum01678);
v128_t vout = wasm_f32x4_mul(vsum, vscale);
vout = wasm_f32x4_max(vout, vmin);
vout = wasm_f32x4_min(vout, vmax);
if (c & 2) {
wasm_v128_store64_lane(output, vout, 0);
vout = wasm_v64x2_shuffle(vout, vout, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vout, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 5,498 | 29.214286 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-avgpool/f32-avgpool-9x-minmax-wasmsimd-x86-c4.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/avgpool.h>
void xnn_f32_avgpool_minmax_ukernel_9x__wasmsimd_x86_c4(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const float** input,
size_t input_offset,
const float* zero,
float* output,
size_t input_increment,
size_t output_increment,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(kernel_elements <= 9);
assert(channels != 0);
const v128_t vscale = wasm_v128_load32_splat(¶ms->scalar.scale);
const v128_t vmin = wasm_v128_load32_splat(¶ms->scalar.min);
const v128_t vmax = wasm_v128_load32_splat(¶ms->scalar.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
const float* i8 = input[8];
input = (const float**) ((uintptr_t) input + input_increment);
if (kernel_elements < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (kernel_elements <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (kernel_elements < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (kernel_elements <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (kernel_elements < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (kernel_elements <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (kernel_elements < 8) {
i7 = zero;
}
assert(i7 != NULL);
if (kernel_elements <= 8) {
i8 = zero;
}
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
while (c >= 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vi7 = wasm_v128_load(i7);
i7 += 4;
const v128_t vi8 = wasm_v128_load(i8);
i8 += 4;
const v128_t vsum018 = wasm_f32x4_add(wasm_f32x4_add(vi0, vi1), vi8);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum67 = wasm_f32x4_add(vi6, vi7);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum01678 = wasm_f32x4_add(vsum018, vsum67);
const v128_t vsum = wasm_f32x4_add(vsum2345, vsum01678);
v128_t vout = wasm_f32x4_mul(vsum, vscale);
vout = wasm_f32x4_pmax(vmin, vout);
vout = wasm_f32x4_pmin(vmax, vout);
wasm_v128_store(output, vout);
output += 4;
c -= 4;
}
if (c != 0) {
const v128_t vi0 = wasm_v128_load(i0);
const v128_t vi1 = wasm_v128_load(i1);
const v128_t vi2 = wasm_v128_load(i2);
const v128_t vi3 = wasm_v128_load(i3);
const v128_t vi4 = wasm_v128_load(i4);
const v128_t vi5 = wasm_v128_load(i5);
const v128_t vi6 = wasm_v128_load(i6);
const v128_t vi7 = wasm_v128_load(i7);
const v128_t vi8 = wasm_v128_load(i8);
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum67 = wasm_f32x4_add(vi6, vi7);
const v128_t vsum018 = wasm_f32x4_add(vsum01, vi8);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum01678 = wasm_f32x4_add(vsum018, vsum67);
const v128_t vsum = wasm_f32x4_add(vsum2345, vsum01678);
v128_t vout = wasm_f32x4_mul(vsum, vscale);
vout = wasm_f32x4_pmax(vmin, vout);
vout = wasm_f32x4_pmin(vmax, vout);
if (c & 2) {
wasm_v128_store64_lane(output, vout, 0);
vout = wasm_v64x2_shuffle(vout, vout, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vout, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 5,502 | 29.236264 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-conv-hwc/f32-conv-hwc-3x3s2p0p1c3x4-scalar-1x1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/conv.h>
#include <xnnpack/math.h>
void xnn_f32_conv_hwc_ukernel_3x3s2p0p1c3x4__scalar_1x1(
size_t input_height,
size_t input_width,
size_t output_y_start,
size_t output_y_end,
const float* input,
const float* zero,
const float* weights,
float* output,
size_t input_padding_top,
size_t output_channels,
size_t output_height_stride,
size_t output_width_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_width != 0);
assert(output_y_end > output_y_start);
assert(input_padding_top <= 1);
assert(output_channels != 0);
const size_t input_height_stride = input_width * 3 /* channels */ * sizeof(float);
const size_t input_width_decrement = round_down_po2(input_width - 1, 2) * 3 /* channels */ * sizeof(float);
const size_t output_width = input_width / 2;
const size_t output_channel_decrement = output_width * output_width_stride - 4 * sizeof(float);
const size_t output_height_increment = output_height_stride - round_up_po2(output_channels, 4) * sizeof(float);
// Adjustment for padding processed below
const float* i0 = (const float*) ((uintptr_t) input + input_height_stride * (output_y_start * 2 - input_padding_top));
const float* i1 = (const float*) ((uintptr_t) i0 + input_height_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_height_stride);
float* o0 = (float*) ((uintptr_t) output + output_height_stride * output_y_start);
if XNN_UNPREDICTABLE(output_y_start < input_padding_top) {
i0 = zero;
}
const float voutput_max = params->scalar.max;
const float voutput_min = params->scalar.min;
for (size_t output_y = output_y_start; output_y < output_y_end; output_y += 1) {
const size_t input_y2 = output_y * 2 + 2 - input_padding_top;
if XNN_UNPREDICTABLE(input_y2 >= input_height) {
i2 = zero;
}
const float* w = weights;
size_t c = output_channels;
do {
float vi00c0 = i0[0];
float vi00c1 = i0[1];
float vi00c2 = i0[2];
float vi10c0 = i1[0];
float vi10c1 = i1[1];
float vi10c2 = i1[2];
float vi20c0 = i2[0];
float vi20c1 = i2[1];
float vi20c2 = i2[2];
size_t iw = input_width - 1;
for (; iw >= 2; iw -= 2) {
// start with biases
float voc0 = w[0];
float voc1 = w[1];
float voc2 = w[2];
float voc3 = w[3];
const float vk00c0x0 = w[4];
const float vk00c0x1 = w[5];
const float vk00c0x2 = w[6];
const float vk00c0x3 = w[7];
voc0 += vk00c0x0 * vi00c0;
voc1 += vk00c0x1 * vi00c0;
voc2 += vk00c0x2 * vi00c0;
voc3 += vk00c0x3 * vi00c0;
const float vk10c0x0 = w[8];
const float vk10c0x1 = w[9];
const float vk10c0x2 = w[10];
const float vk10c0x3 = w[11];
voc0 += vk10c0x0 * vi10c0;
voc1 += vk10c0x1 * vi10c0;
voc2 += vk10c0x2 * vi10c0;
voc3 += vk10c0x3 * vi10c0;
const float vk20c0x0 = w[12];
const float vk20c0x1 = w[13];
const float vk20c0x2 = w[14];
const float vk20c0x3 = w[15];
voc0 += vk20c0x0 * vi20c0;
voc1 += vk20c0x1 * vi20c0;
voc2 += vk20c0x2 * vi20c0;
voc3 += vk20c0x3 * vi20c0;
const float vk00c1x0 = w[16];
const float vk00c1x1 = w[17];
const float vk00c1x2 = w[18];
const float vk00c1x3 = w[19];
voc0 += vk00c1x0 * vi00c1;
voc1 += vk00c1x1 * vi00c1;
voc2 += vk00c1x2 * vi00c1;
voc3 += vk00c1x3 * vi00c1;
const float vk10c1x0 = w[20];
const float vk10c1x1 = w[21];
const float vk10c1x2 = w[22];
const float vk10c1x3 = w[23];
voc0 += vk10c1x0 * vi10c1;
voc1 += vk10c1x1 * vi10c1;
voc2 += vk10c1x2 * vi10c1;
voc3 += vk10c1x3 * vi10c1;
const float vk20c1x0 = w[24];
const float vk20c1x1 = w[25];
const float vk20c1x2 = w[26];
const float vk20c1x3 = w[27];
voc0 += vk20c1x0 * vi20c1;
voc1 += vk20c1x1 * vi20c1;
voc2 += vk20c1x2 * vi20c1;
voc3 += vk20c1x3 * vi20c1;
const float vk00c2x0 = w[28];
const float vk00c2x1 = w[29];
const float vk00c2x2 = w[30];
const float vk00c2x3 = w[31];
voc0 += vk00c2x0 * vi00c2;
voc1 += vk00c2x1 * vi00c2;
voc2 += vk00c2x2 * vi00c2;
voc3 += vk00c2x3 * vi00c2;
const float vk10c2x0 = w[32];
const float vk10c2x1 = w[33];
const float vk10c2x2 = w[34];
const float vk10c2x3 = w[35];
voc0 += vk10c2x0 * vi10c2;
voc1 += vk10c2x1 * vi10c2;
voc2 += vk10c2x2 * vi10c2;
voc3 += vk10c2x3 * vi10c2;
const float vk20c2x0 = w[36];
const float vk20c2x1 = w[37];
const float vk20c2x2 = w[38];
const float vk20c2x3 = w[39];
voc0 += vk20c2x0 * vi20c2;
voc1 += vk20c2x1 * vi20c2;
voc2 += vk20c2x2 * vi20c2;
voc3 += vk20c2x3 * vi20c2;
const float vk01c0x0 = w[40];
const float vk01c0x1 = w[41];
const float vk01c0x2 = w[42];
const float vk01c0x3 = w[43];
const float vi01c0 = i0[3];
voc0 += vk01c0x0 * vi01c0;
voc1 += vk01c0x1 * vi01c0;
voc2 += vk01c0x2 * vi01c0;
voc3 += vk01c0x3 * vi01c0;
const float vk11c0x0 = w[44];
const float vk11c0x1 = w[45];
const float vk11c0x2 = w[46];
const float vk11c0x3 = w[47];
const float vi11c0 = i1[3];
voc0 += vk11c0x0 * vi11c0;
voc1 += vk11c0x1 * vi11c0;
voc2 += vk11c0x2 * vi11c0;
voc3 += vk11c0x3 * vi11c0;
const float vk21c0x0 = w[48];
const float vk21c0x1 = w[49];
const float vk21c0x2 = w[50];
const float vk21c0x3 = w[51];
const float vi21c0 = i2[3];
voc0 += vk21c0x0 * vi21c0;
voc1 += vk21c0x1 * vi21c0;
voc2 += vk21c0x2 * vi21c0;
voc3 += vk21c0x3 * vi21c0;
const float vk01c1x0 = w[52];
const float vk01c1x1 = w[53];
const float vk01c1x2 = w[54];
const float vk01c1x3 = w[55];
const float vi01c1 = i0[4];
voc0 += vk01c1x0 * vi01c1;
voc1 += vk01c1x1 * vi01c1;
voc2 += vk01c1x2 * vi01c1;
voc3 += vk01c1x3 * vi01c1;
const float vk11c1x0 = w[56];
const float vk11c1x1 = w[57];
const float vk11c1x2 = w[58];
const float vk11c1x3 = w[59];
const float vi11c1 = i1[4];
voc0 += vk11c1x0 * vi11c1;
voc1 += vk11c1x1 * vi11c1;
voc2 += vk11c1x2 * vi11c1;
voc3 += vk11c1x3 * vi11c1;
const float vk21c1x0 = w[60];
const float vk21c1x1 = w[61];
const float vk21c1x2 = w[62];
const float vk21c1x3 = w[63];
const float vi21c1 = i2[4];
voc0 += vk21c1x0 * vi21c1;
voc1 += vk21c1x1 * vi21c1;
voc2 += vk21c1x2 * vi21c1;
voc3 += vk21c1x3 * vi21c1;
const float vk01c2x0 = w[64];
const float vk01c2x1 = w[65];
const float vk01c2x2 = w[66];
const float vk01c2x3 = w[67];
const float vi01c2 = i0[5];
voc0 += vk01c2x0 * vi01c2;
voc1 += vk01c2x1 * vi01c2;
voc2 += vk01c2x2 * vi01c2;
voc3 += vk01c2x3 * vi01c2;
const float vk11c2x0 = w[68];
const float vk11c2x1 = w[69];
const float vk11c2x2 = w[70];
const float vk11c2x3 = w[71];
const float vi11c2 = i1[5];
voc0 += vk11c2x0 * vi11c2;
voc1 += vk11c2x1 * vi11c2;
voc2 += vk11c2x2 * vi11c2;
voc3 += vk11c2x3 * vi11c2;
const float vk21c2x0 = w[72];
const float vk21c2x1 = w[73];
const float vk21c2x2 = w[74];
const float vk21c2x3 = w[75];
const float vi21c2 = i2[5];
voc0 += vk21c2x0 * vi21c2;
voc1 += vk21c2x1 * vi21c2;
voc2 += vk21c2x2 * vi21c2;
voc3 += vk21c2x3 * vi21c2;
const float vk02c0x0 = w[76];
const float vk02c0x1 = w[77];
const float vk02c0x2 = w[78];
const float vk02c0x3 = w[79];
const float vi02c0 = i0[6];
voc0 += vk02c0x0 * vi02c0;
voc1 += vk02c0x1 * vi02c0;
voc2 += vk02c0x2 * vi02c0;
voc3 += vk02c0x3 * vi02c0;
const float vk12c0x0 = w[80];
const float vk12c0x1 = w[81];
const float vk12c0x2 = w[82];
const float vk12c0x3 = w[83];
const float vi12c0 = i1[6];
voc0 += vk12c0x0 * vi12c0;
voc1 += vk12c0x1 * vi12c0;
voc2 += vk12c0x2 * vi12c0;
voc3 += vk12c0x3 * vi12c0;
const float vk22c0x0 = w[84];
const float vk22c0x1 = w[85];
const float vk22c0x2 = w[86];
const float vk22c0x3 = w[87];
const float vi22c0 = i2[6];
voc0 += vk22c0x0 * vi22c0;
voc1 += vk22c0x1 * vi22c0;
voc2 += vk22c0x2 * vi22c0;
voc3 += vk22c0x3 * vi22c0;
vi00c0 = vi02c0;
vi10c0 = vi12c0;
vi20c0 = vi22c0;
const float vk02c1x0 = w[88];
const float vk02c1x1 = w[89];
const float vk02c1x2 = w[90];
const float vk02c1x3 = w[91];
const float vi02c1 = i0[7];
voc0 += vk02c1x0 * vi02c1;
voc1 += vk02c1x1 * vi02c1;
voc2 += vk02c1x2 * vi02c1;
voc3 += vk02c1x3 * vi02c1;
const float vk12c1x0 = w[92];
const float vk12c1x1 = w[93];
const float vk12c1x2 = w[94];
const float vk12c1x3 = w[95];
const float vi12c1 = i1[7];
voc0 += vk12c1x0 * vi12c1;
voc1 += vk12c1x1 * vi12c1;
voc2 += vk12c1x2 * vi12c1;
voc3 += vk12c1x3 * vi12c1;
const float vk22c1x0 = w[96];
const float vk22c1x1 = w[97];
const float vk22c1x2 = w[98];
const float vk22c1x3 = w[99];
const float vi22c1 = i2[7];
voc0 += vk22c1x0 * vi22c1;
voc1 += vk22c1x1 * vi22c1;
voc2 += vk22c1x2 * vi22c1;
voc3 += vk22c1x3 * vi22c1;
vi00c1 = vi02c1;
vi10c1 = vi12c1;
vi20c1 = vi22c1;
const float vk02c2x0 = w[100];
const float vk02c2x1 = w[101];
const float vk02c2x2 = w[102];
const float vk02c2x3 = w[103];
const float vi02c2 = i0[8];
voc0 += vk02c2x0 * vi02c2;
voc1 += vk02c2x1 * vi02c2;
voc2 += vk02c2x2 * vi02c2;
voc3 += vk02c2x3 * vi02c2;
const float vk12c2x0 = w[104];
const float vk12c2x1 = w[105];
const float vk12c2x2 = w[106];
const float vk12c2x3 = w[107];
const float vi12c2 = i1[8];
voc0 += vk12c2x0 * vi12c2;
voc1 += vk12c2x1 * vi12c2;
voc2 += vk12c2x2 * vi12c2;
voc3 += vk12c2x3 * vi12c2;
const float vk22c2x0 = w[108];
const float vk22c2x1 = w[109];
const float vk22c2x2 = w[110];
const float vk22c2x3 = w[111];
const float vi22c2 = i2[8];
voc0 += vk22c2x0 * vi22c2;
voc1 += vk22c2x1 * vi22c2;
voc2 += vk22c2x2 * vi22c2;
voc3 += vk22c2x3 * vi22c2;
vi00c2 = vi02c2;
vi10c2 = vi12c2;
vi20c2 = vi22c2;
voc0 = math_min_f32(voc0, voutput_max);
voc1 = math_min_f32(voc1, voutput_max);
voc2 = math_min_f32(voc2, voutput_max);
voc3 = math_min_f32(voc3, voutput_max);
voc0 = math_max_f32(voc0, voutput_min);
voc1 = math_max_f32(voc1, voutput_min);
voc2 = math_max_f32(voc2, voutput_min);
voc3 = math_max_f32(voc3, voutput_min);
if XNN_LIKELY(c >= 4) {
o0[0] = voc0;
o0[1] = voc1;
o0[2] = voc2;
o0[3] = voc3;
o0 = (float*) ((uintptr_t) o0 + output_width_stride);
} else {
float* o0_tmp = o0;
if (c & 2) {
o0_tmp[0] = voc0;
o0_tmp[1] = voc1;
o0_tmp += 2;
voc0 = voc2;
}
if (c & 1) {
*o0_tmp++ = voc0;
}
o0 = (float*) ((uintptr_t) o0 + output_width_stride);
}
i0 += 6;
i1 += 6;
i2 += 6;
}
assert(iw < 2);
if XNN_UNLIKELY(iw != 0) {
float voc0 = w[0];
float voc1 = w[1];
float voc2 = w[2];
float voc3 = w[3];
const float vk00c0x0 = w[4];
const float vk00c0x1 = w[5];
const float vk00c0x2 = w[6];
const float vk00c0x3 = w[7];
voc0 += vk00c0x0 * vi00c0;
voc1 += vk00c0x1 * vi00c0;
voc2 += vk00c0x2 * vi00c0;
voc3 += vk00c0x3 * vi00c0;
const float vk10c0x0 = w[8];
const float vk10c0x1 = w[9];
const float vk10c0x2 = w[10];
const float vk10c0x3 = w[11];
voc0 += vk10c0x0 * vi10c0;
voc1 += vk10c0x1 * vi10c0;
voc2 += vk10c0x2 * vi10c0;
voc3 += vk10c0x3 * vi10c0;
const float vk20c0x0 = w[12];
const float vk20c0x1 = w[13];
const float vk20c0x2 = w[14];
const float vk20c0x3 = w[15];
voc0 += vk20c0x0 * vi20c0;
voc1 += vk20c0x1 * vi20c0;
voc2 += vk20c0x2 * vi20c0;
voc3 += vk20c0x3 * vi20c0;
const float vk00c1x0 = w[16];
const float vk00c1x1 = w[17];
const float vk00c1x2 = w[18];
const float vk00c1x3 = w[19];
voc0 += vk00c1x0 * vi00c1;
voc1 += vk00c1x1 * vi00c1;
voc2 += vk00c1x2 * vi00c1;
voc3 += vk00c1x3 * vi00c1;
const float vk10c1x0 = w[20];
const float vk10c1x1 = w[21];
const float vk10c1x2 = w[22];
const float vk10c1x3 = w[23];
voc0 += vk10c1x0 * vi10c1;
voc1 += vk10c1x1 * vi10c1;
voc2 += vk10c1x2 * vi10c1;
voc3 += vk10c1x3 * vi10c1;
const float vk20c1x0 = w[24];
const float vk20c1x1 = w[25];
const float vk20c1x2 = w[26];
const float vk20c1x3 = w[27];
voc0 += vk20c1x0 * vi20c1;
voc1 += vk20c1x1 * vi20c1;
voc2 += vk20c1x2 * vi20c1;
voc3 += vk20c1x3 * vi20c1;
const float vk00c2x0 = w[28];
const float vk00c2x1 = w[29];
const float vk00c2x2 = w[30];
const float vk00c2x3 = w[31];
voc0 += vk00c2x0 * vi00c2;
voc1 += vk00c2x1 * vi00c2;
voc2 += vk00c2x2 * vi00c2;
voc3 += vk00c2x3 * vi00c2;
const float vk10c2x0 = w[32];
const float vk10c2x1 = w[33];
const float vk10c2x2 = w[34];
const float vk10c2x3 = w[35];
voc0 += vk10c2x0 * vi10c2;
voc1 += vk10c2x1 * vi10c2;
voc2 += vk10c2x2 * vi10c2;
voc3 += vk10c2x3 * vi10c2;
const float vk20c2x0 = w[36];
const float vk20c2x1 = w[37];
const float vk20c2x2 = w[38];
const float vk20c2x3 = w[39];
voc0 += vk20c2x0 * vi20c2;
voc1 += vk20c2x1 * vi20c2;
voc2 += vk20c2x2 * vi20c2;
voc3 += vk20c2x3 * vi20c2;
const float vk01c0x0 = w[40];
const float vk01c0x1 = w[41];
const float vk01c0x2 = w[42];
const float vk01c0x3 = w[43];
const float vi01c0 = i0[3];
voc0 += vk01c0x0 * vi01c0;
voc1 += vk01c0x1 * vi01c0;
voc2 += vk01c0x2 * vi01c0;
voc3 += vk01c0x3 * vi01c0;
const float vk11c0x0 = w[44];
const float vk11c0x1 = w[45];
const float vk11c0x2 = w[46];
const float vk11c0x3 = w[47];
const float vi11c0 = i1[3];
voc0 += vk11c0x0 * vi11c0;
voc1 += vk11c0x1 * vi11c0;
voc2 += vk11c0x2 * vi11c0;
voc3 += vk11c0x3 * vi11c0;
const float vk21c0x0 = w[48];
const float vk21c0x1 = w[49];
const float vk21c0x2 = w[50];
const float vk21c0x3 = w[51];
const float vi21c0 = i2[3];
voc0 += vk21c0x0 * vi21c0;
voc1 += vk21c0x1 * vi21c0;
voc2 += vk21c0x2 * vi21c0;
voc3 += vk21c0x3 * vi21c0;
const float vk01c1x0 = w[52];
const float vk01c1x1 = w[53];
const float vk01c1x2 = w[54];
const float vk01c1x3 = w[55];
const float vi01c1 = i0[4];
voc0 += vk01c1x0 * vi01c1;
voc1 += vk01c1x1 * vi01c1;
voc2 += vk01c1x2 * vi01c1;
voc3 += vk01c1x3 * vi01c1;
const float vk11c1x0 = w[56];
const float vk11c1x1 = w[57];
const float vk11c1x2 = w[58];
const float vk11c1x3 = w[59];
const float vi11c1 = i1[4];
voc0 += vk11c1x0 * vi11c1;
voc1 += vk11c1x1 * vi11c1;
voc2 += vk11c1x2 * vi11c1;
voc3 += vk11c1x3 * vi11c1;
const float vk21c1x0 = w[60];
const float vk21c1x1 = w[61];
const float vk21c1x2 = w[62];
const float vk21c1x3 = w[63];
const float vi21c1 = i2[4];
voc0 += vk21c1x0 * vi21c1;
voc1 += vk21c1x1 * vi21c1;
voc2 += vk21c1x2 * vi21c1;
voc3 += vk21c1x3 * vi21c1;
const float vk01c2x0 = w[64];
const float vk01c2x1 = w[65];
const float vk01c2x2 = w[66];
const float vk01c2x3 = w[67];
const float vi01c2 = i0[5];
voc0 += vk01c2x0 * vi01c2;
voc1 += vk01c2x1 * vi01c2;
voc2 += vk01c2x2 * vi01c2;
voc3 += vk01c2x3 * vi01c2;
const float vk11c2x0 = w[68];
const float vk11c2x1 = w[69];
const float vk11c2x2 = w[70];
const float vk11c2x3 = w[71];
const float vi11c2 = i1[5];
voc0 += vk11c2x0 * vi11c2;
voc1 += vk11c2x1 * vi11c2;
voc2 += vk11c2x2 * vi11c2;
voc3 += vk11c2x3 * vi11c2;
const float vk21c2x0 = w[72];
const float vk21c2x1 = w[73];
const float vk21c2x2 = w[74];
const float vk21c2x3 = w[75];
const float vi21c2 = i2[5];
voc0 += vk21c2x0 * vi21c2;
voc1 += vk21c2x1 * vi21c2;
voc2 += vk21c2x2 * vi21c2;
voc3 += vk21c2x3 * vi21c2;
voc0 = math_min_f32(voc0, voutput_max);
voc1 = math_min_f32(voc1, voutput_max);
voc2 = math_min_f32(voc2, voutput_max);
voc3 = math_min_f32(voc3, voutput_max);
voc0 = math_max_f32(voc0, voutput_min);
voc1 = math_max_f32(voc1, voutput_min);
voc2 = math_max_f32(voc2, voutput_min);
voc3 = math_max_f32(voc3, voutput_min);
if XNN_LIKELY(c >= 4) {
o0[0] = voc0;
o0[1] = voc1;
o0[2] = voc2;
o0[3] = voc3;
o0 = (float*) ((uintptr_t) o0 + output_width_stride);
} else {
float* o0_tmp = o0;
if (c & 2) {
o0_tmp[0] = voc0;
o0_tmp[1] = voc1;
o0_tmp += 2;
voc0 = voc2;
}
if (c & 1) {
*o0_tmp++ = voc0;
}
o0 = (float*) ((uintptr_t) o0 + output_width_stride);
}
}
// Move output pointers back to the position of the first pixel in a row,
// and forward to the next block of output channels
o0 = (float*) ((uintptr_t) o0 - output_channel_decrement);
// Revert input pointers to the position of the first pixel in a row
i0 = (const float*) ((uintptr_t) i0 - input_width_decrement);
i1 = (const float*) ((uintptr_t) i1 - input_width_decrement);
i2 = (const float*) ((uintptr_t) i2 - input_width_decrement);
// Move to the block of weights for the next 4 output channels
w += 112;
c = doz(c, 4);
} while (c != 0);
// Move output pointers back to the position of the first channel, and forward to the next block of rows
o0 = (float*) ((uintptr_t) o0 + output_height_increment);
// Move input pointers forward to the next row
i0 = i2;
i1 = (const float*) ((uintptr_t) i0 + input_height_stride);
i2 = (const float*) ((uintptr_t) i1 + input_height_stride);
}
}
| 20,103 | 28.391813 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-conv-hwc/f32-conv-hwc-3x3s2p1c3x4-scalar-1x1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/conv.h>
#include <xnnpack/math.h>
void xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__scalar_1x1(
size_t input_height,
size_t input_width,
size_t output_y_start,
size_t output_y_end,
const float* input,
const float* zero,
const float* weights,
float* output,
size_t input_padding_top,
size_t output_channels,
size_t output_height_stride,
size_t output_width_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_width != 0);
assert(output_y_end > output_y_start);
assert(input_padding_top <= 1);
assert(output_channels != 0);
const size_t input_height_stride = input_width * 3 /* channels */ * sizeof(float);
const size_t input_width_decrement = round_down_po2(input_width, 2) * 3 /* channels */ * sizeof(float);
const size_t output_width = (input_width + 1) / 2;
const size_t output_channel_decrement = output_width * output_width_stride - 4 * sizeof(float);
const size_t output_height_increment = output_height_stride - round_up_po2(output_channels, 4) * sizeof(float);
// Adjustment for padding processed below
const float* i0 = (const float*) ((uintptr_t) input + input_height_stride * (output_y_start * 2 - input_padding_top));
const float* i1 = (const float*) ((uintptr_t) i0 + input_height_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_height_stride);
float* o0 = (float*) ((uintptr_t) output + output_height_stride * output_y_start);
if XNN_UNPREDICTABLE(output_y_start < input_padding_top) {
i0 = zero;
}
const float voutput_max = params->scalar.max;
const float voutput_min = params->scalar.min;
for (size_t output_y = output_y_start; output_y < output_y_end; output_y += 1) {
const size_t input_y2 = output_y * 2 + 2 - input_padding_top;
if XNN_UNPREDICTABLE(input_y2 >= input_height) {
i2 = zero;
}
const float* w = weights;
size_t c = output_channels;
do {
float vi00c0 = 0.0f;
float vi00c1 = 0.0f;
float vi00c2 = 0.0f;
float vi10c0 = 0.0f;
float vi10c1 = 0.0f;
float vi10c2 = 0.0f;
float vi20c0 = 0.0f;
float vi20c1 = 0.0f;
float vi20c2 = 0.0f;
size_t iw = input_width;
for (; iw >= 2; iw -= 2) {
// start with biases
float voc0 = w[0];
float voc1 = w[1];
float voc2 = w[2];
float voc3 = w[3];
const float vk00c0x0 = w[4];
const float vk00c0x1 = w[5];
const float vk00c0x2 = w[6];
const float vk00c0x3 = w[7];
voc0 += vk00c0x0 * vi00c0;
voc1 += vk00c0x1 * vi00c0;
voc2 += vk00c0x2 * vi00c0;
voc3 += vk00c0x3 * vi00c0;
const float vk10c0x0 = w[8];
const float vk10c0x1 = w[9];
const float vk10c0x2 = w[10];
const float vk10c0x3 = w[11];
voc0 += vk10c0x0 * vi10c0;
voc1 += vk10c0x1 * vi10c0;
voc2 += vk10c0x2 * vi10c0;
voc3 += vk10c0x3 * vi10c0;
const float vk20c0x0 = w[12];
const float vk20c0x1 = w[13];
const float vk20c0x2 = w[14];
const float vk20c0x3 = w[15];
voc0 += vk20c0x0 * vi20c0;
voc1 += vk20c0x1 * vi20c0;
voc2 += vk20c0x2 * vi20c0;
voc3 += vk20c0x3 * vi20c0;
const float vk00c1x0 = w[16];
const float vk00c1x1 = w[17];
const float vk00c1x2 = w[18];
const float vk00c1x3 = w[19];
voc0 += vk00c1x0 * vi00c1;
voc1 += vk00c1x1 * vi00c1;
voc2 += vk00c1x2 * vi00c1;
voc3 += vk00c1x3 * vi00c1;
const float vk10c1x0 = w[20];
const float vk10c1x1 = w[21];
const float vk10c1x2 = w[22];
const float vk10c1x3 = w[23];
voc0 += vk10c1x0 * vi10c1;
voc1 += vk10c1x1 * vi10c1;
voc2 += vk10c1x2 * vi10c1;
voc3 += vk10c1x3 * vi10c1;
const float vk20c1x0 = w[24];
const float vk20c1x1 = w[25];
const float vk20c1x2 = w[26];
const float vk20c1x3 = w[27];
voc0 += vk20c1x0 * vi20c1;
voc1 += vk20c1x1 * vi20c1;
voc2 += vk20c1x2 * vi20c1;
voc3 += vk20c1x3 * vi20c1;
const float vk00c2x0 = w[28];
const float vk00c2x1 = w[29];
const float vk00c2x2 = w[30];
const float vk00c2x3 = w[31];
voc0 += vk00c2x0 * vi00c2;
voc1 += vk00c2x1 * vi00c2;
voc2 += vk00c2x2 * vi00c2;
voc3 += vk00c2x3 * vi00c2;
const float vk10c2x0 = w[32];
const float vk10c2x1 = w[33];
const float vk10c2x2 = w[34];
const float vk10c2x3 = w[35];
voc0 += vk10c2x0 * vi10c2;
voc1 += vk10c2x1 * vi10c2;
voc2 += vk10c2x2 * vi10c2;
voc3 += vk10c2x3 * vi10c2;
const float vk20c2x0 = w[36];
const float vk20c2x1 = w[37];
const float vk20c2x2 = w[38];
const float vk20c2x3 = w[39];
voc0 += vk20c2x0 * vi20c2;
voc1 += vk20c2x1 * vi20c2;
voc2 += vk20c2x2 * vi20c2;
voc3 += vk20c2x3 * vi20c2;
const float vk01c0x0 = w[40];
const float vk01c0x1 = w[41];
const float vk01c0x2 = w[42];
const float vk01c0x3 = w[43];
const float vi01c0 = i0[0];
voc0 += vk01c0x0 * vi01c0;
voc1 += vk01c0x1 * vi01c0;
voc2 += vk01c0x2 * vi01c0;
voc3 += vk01c0x3 * vi01c0;
const float vk11c0x0 = w[44];
const float vk11c0x1 = w[45];
const float vk11c0x2 = w[46];
const float vk11c0x3 = w[47];
const float vi11c0 = i1[0];
voc0 += vk11c0x0 * vi11c0;
voc1 += vk11c0x1 * vi11c0;
voc2 += vk11c0x2 * vi11c0;
voc3 += vk11c0x3 * vi11c0;
const float vk21c0x0 = w[48];
const float vk21c0x1 = w[49];
const float vk21c0x2 = w[50];
const float vk21c0x3 = w[51];
const float vi21c0 = i2[0];
voc0 += vk21c0x0 * vi21c0;
voc1 += vk21c0x1 * vi21c0;
voc2 += vk21c0x2 * vi21c0;
voc3 += vk21c0x3 * vi21c0;
const float vk01c1x0 = w[52];
const float vk01c1x1 = w[53];
const float vk01c1x2 = w[54];
const float vk01c1x3 = w[55];
const float vi01c1 = i0[1];
voc0 += vk01c1x0 * vi01c1;
voc1 += vk01c1x1 * vi01c1;
voc2 += vk01c1x2 * vi01c1;
voc3 += vk01c1x3 * vi01c1;
const float vk11c1x0 = w[56];
const float vk11c1x1 = w[57];
const float vk11c1x2 = w[58];
const float vk11c1x3 = w[59];
const float vi11c1 = i1[1];
voc0 += vk11c1x0 * vi11c1;
voc1 += vk11c1x1 * vi11c1;
voc2 += vk11c1x2 * vi11c1;
voc3 += vk11c1x3 * vi11c1;
const float vk21c1x0 = w[60];
const float vk21c1x1 = w[61];
const float vk21c1x2 = w[62];
const float vk21c1x3 = w[63];
const float vi21c1 = i2[1];
voc0 += vk21c1x0 * vi21c1;
voc1 += vk21c1x1 * vi21c1;
voc2 += vk21c1x2 * vi21c1;
voc3 += vk21c1x3 * vi21c1;
const float vk01c2x0 = w[64];
const float vk01c2x1 = w[65];
const float vk01c2x2 = w[66];
const float vk01c2x3 = w[67];
const float vi01c2 = i0[2];
voc0 += vk01c2x0 * vi01c2;
voc1 += vk01c2x1 * vi01c2;
voc2 += vk01c2x2 * vi01c2;
voc3 += vk01c2x3 * vi01c2;
const float vk11c2x0 = w[68];
const float vk11c2x1 = w[69];
const float vk11c2x2 = w[70];
const float vk11c2x3 = w[71];
const float vi11c2 = i1[2];
voc0 += vk11c2x0 * vi11c2;
voc1 += vk11c2x1 * vi11c2;
voc2 += vk11c2x2 * vi11c2;
voc3 += vk11c2x3 * vi11c2;
const float vk21c2x0 = w[72];
const float vk21c2x1 = w[73];
const float vk21c2x2 = w[74];
const float vk21c2x3 = w[75];
const float vi21c2 = i2[2];
voc0 += vk21c2x0 * vi21c2;
voc1 += vk21c2x1 * vi21c2;
voc2 += vk21c2x2 * vi21c2;
voc3 += vk21c2x3 * vi21c2;
const float vk02c0x0 = w[76];
const float vk02c0x1 = w[77];
const float vk02c0x2 = w[78];
const float vk02c0x3 = w[79];
const float vi02c0 = i0[3];
voc0 += vk02c0x0 * vi02c0;
voc1 += vk02c0x1 * vi02c0;
voc2 += vk02c0x2 * vi02c0;
voc3 += vk02c0x3 * vi02c0;
const float vk12c0x0 = w[80];
const float vk12c0x1 = w[81];
const float vk12c0x2 = w[82];
const float vk12c0x3 = w[83];
const float vi12c0 = i1[3];
voc0 += vk12c0x0 * vi12c0;
voc1 += vk12c0x1 * vi12c0;
voc2 += vk12c0x2 * vi12c0;
voc3 += vk12c0x3 * vi12c0;
const float vk22c0x0 = w[84];
const float vk22c0x1 = w[85];
const float vk22c0x2 = w[86];
const float vk22c0x3 = w[87];
const float vi22c0 = i2[3];
voc0 += vk22c0x0 * vi22c0;
voc1 += vk22c0x1 * vi22c0;
voc2 += vk22c0x2 * vi22c0;
voc3 += vk22c0x3 * vi22c0;
vi00c0 = vi02c0;
vi10c0 = vi12c0;
vi20c0 = vi22c0;
const float vk02c1x0 = w[88];
const float vk02c1x1 = w[89];
const float vk02c1x2 = w[90];
const float vk02c1x3 = w[91];
const float vi02c1 = i0[4];
voc0 += vk02c1x0 * vi02c1;
voc1 += vk02c1x1 * vi02c1;
voc2 += vk02c1x2 * vi02c1;
voc3 += vk02c1x3 * vi02c1;
const float vk12c1x0 = w[92];
const float vk12c1x1 = w[93];
const float vk12c1x2 = w[94];
const float vk12c1x3 = w[95];
const float vi12c1 = i1[4];
voc0 += vk12c1x0 * vi12c1;
voc1 += vk12c1x1 * vi12c1;
voc2 += vk12c1x2 * vi12c1;
voc3 += vk12c1x3 * vi12c1;
const float vk22c1x0 = w[96];
const float vk22c1x1 = w[97];
const float vk22c1x2 = w[98];
const float vk22c1x3 = w[99];
const float vi22c1 = i2[4];
voc0 += vk22c1x0 * vi22c1;
voc1 += vk22c1x1 * vi22c1;
voc2 += vk22c1x2 * vi22c1;
voc3 += vk22c1x3 * vi22c1;
vi00c1 = vi02c1;
vi10c1 = vi12c1;
vi20c1 = vi22c1;
const float vk02c2x0 = w[100];
const float vk02c2x1 = w[101];
const float vk02c2x2 = w[102];
const float vk02c2x3 = w[103];
const float vi02c2 = i0[5];
voc0 += vk02c2x0 * vi02c2;
voc1 += vk02c2x1 * vi02c2;
voc2 += vk02c2x2 * vi02c2;
voc3 += vk02c2x3 * vi02c2;
const float vk12c2x0 = w[104];
const float vk12c2x1 = w[105];
const float vk12c2x2 = w[106];
const float vk12c2x3 = w[107];
const float vi12c2 = i1[5];
voc0 += vk12c2x0 * vi12c2;
voc1 += vk12c2x1 * vi12c2;
voc2 += vk12c2x2 * vi12c2;
voc3 += vk12c2x3 * vi12c2;
const float vk22c2x0 = w[108];
const float vk22c2x1 = w[109];
const float vk22c2x2 = w[110];
const float vk22c2x3 = w[111];
const float vi22c2 = i2[5];
voc0 += vk22c2x0 * vi22c2;
voc1 += vk22c2x1 * vi22c2;
voc2 += vk22c2x2 * vi22c2;
voc3 += vk22c2x3 * vi22c2;
vi00c2 = vi02c2;
vi10c2 = vi12c2;
vi20c2 = vi22c2;
voc0 = math_min_f32(voc0, voutput_max);
voc1 = math_min_f32(voc1, voutput_max);
voc2 = math_min_f32(voc2, voutput_max);
voc3 = math_min_f32(voc3, voutput_max);
voc0 = math_max_f32(voc0, voutput_min);
voc1 = math_max_f32(voc1, voutput_min);
voc2 = math_max_f32(voc2, voutput_min);
voc3 = math_max_f32(voc3, voutput_min);
if XNN_LIKELY(c >= 4) {
o0[0] = voc0;
o0[1] = voc1;
o0[2] = voc2;
o0[3] = voc3;
o0 = (float*) ((uintptr_t) o0 + output_width_stride);
} else {
float* o0_tmp = o0;
if (c & 2) {
o0_tmp[0] = voc0;
o0_tmp[1] = voc1;
o0_tmp += 2;
voc0 = voc2;
}
if (c & 1) {
*o0_tmp++ = voc0;
}
o0 = (float*) ((uintptr_t) o0 + output_width_stride);
}
i0 += 6;
i1 += 6;
i2 += 6;
}
assert(iw < 2);
if XNN_UNLIKELY(iw != 0) {
float voc0 = w[0];
float voc1 = w[1];
float voc2 = w[2];
float voc3 = w[3];
const float vk00c0x0 = w[4];
const float vk00c0x1 = w[5];
const float vk00c0x2 = w[6];
const float vk00c0x3 = w[7];
voc0 += vk00c0x0 * vi00c0;
voc1 += vk00c0x1 * vi00c0;
voc2 += vk00c0x2 * vi00c0;
voc3 += vk00c0x3 * vi00c0;
const float vk10c0x0 = w[8];
const float vk10c0x1 = w[9];
const float vk10c0x2 = w[10];
const float vk10c0x3 = w[11];
voc0 += vk10c0x0 * vi10c0;
voc1 += vk10c0x1 * vi10c0;
voc2 += vk10c0x2 * vi10c0;
voc3 += vk10c0x3 * vi10c0;
const float vk20c0x0 = w[12];
const float vk20c0x1 = w[13];
const float vk20c0x2 = w[14];
const float vk20c0x3 = w[15];
voc0 += vk20c0x0 * vi20c0;
voc1 += vk20c0x1 * vi20c0;
voc2 += vk20c0x2 * vi20c0;
voc3 += vk20c0x3 * vi20c0;
const float vk00c1x0 = w[16];
const float vk00c1x1 = w[17];
const float vk00c1x2 = w[18];
const float vk00c1x3 = w[19];
voc0 += vk00c1x0 * vi00c1;
voc1 += vk00c1x1 * vi00c1;
voc2 += vk00c1x2 * vi00c1;
voc3 += vk00c1x3 * vi00c1;
const float vk10c1x0 = w[20];
const float vk10c1x1 = w[21];
const float vk10c1x2 = w[22];
const float vk10c1x3 = w[23];
voc0 += vk10c1x0 * vi10c1;
voc1 += vk10c1x1 * vi10c1;
voc2 += vk10c1x2 * vi10c1;
voc3 += vk10c1x3 * vi10c1;
const float vk20c1x0 = w[24];
const float vk20c1x1 = w[25];
const float vk20c1x2 = w[26];
const float vk20c1x3 = w[27];
voc0 += vk20c1x0 * vi20c1;
voc1 += vk20c1x1 * vi20c1;
voc2 += vk20c1x2 * vi20c1;
voc3 += vk20c1x3 * vi20c1;
const float vk00c2x0 = w[28];
const float vk00c2x1 = w[29];
const float vk00c2x2 = w[30];
const float vk00c2x3 = w[31];
voc0 += vk00c2x0 * vi00c2;
voc1 += vk00c2x1 * vi00c2;
voc2 += vk00c2x2 * vi00c2;
voc3 += vk00c2x3 * vi00c2;
const float vk10c2x0 = w[32];
const float vk10c2x1 = w[33];
const float vk10c2x2 = w[34];
const float vk10c2x3 = w[35];
voc0 += vk10c2x0 * vi10c2;
voc1 += vk10c2x1 * vi10c2;
voc2 += vk10c2x2 * vi10c2;
voc3 += vk10c2x3 * vi10c2;
const float vk20c2x0 = w[36];
const float vk20c2x1 = w[37];
const float vk20c2x2 = w[38];
const float vk20c2x3 = w[39];
voc0 += vk20c2x0 * vi20c2;
voc1 += vk20c2x1 * vi20c2;
voc2 += vk20c2x2 * vi20c2;
voc3 += vk20c2x3 * vi20c2;
const float vk01c0x0 = w[40];
const float vk01c0x1 = w[41];
const float vk01c0x2 = w[42];
const float vk01c0x3 = w[43];
const float vi01c0 = i0[0];
voc0 += vk01c0x0 * vi01c0;
voc1 += vk01c0x1 * vi01c0;
voc2 += vk01c0x2 * vi01c0;
voc3 += vk01c0x3 * vi01c0;
const float vk11c0x0 = w[44];
const float vk11c0x1 = w[45];
const float vk11c0x2 = w[46];
const float vk11c0x3 = w[47];
const float vi11c0 = i1[0];
voc0 += vk11c0x0 * vi11c0;
voc1 += vk11c0x1 * vi11c0;
voc2 += vk11c0x2 * vi11c0;
voc3 += vk11c0x3 * vi11c0;
const float vk21c0x0 = w[48];
const float vk21c0x1 = w[49];
const float vk21c0x2 = w[50];
const float vk21c0x3 = w[51];
const float vi21c0 = i2[0];
voc0 += vk21c0x0 * vi21c0;
voc1 += vk21c0x1 * vi21c0;
voc2 += vk21c0x2 * vi21c0;
voc3 += vk21c0x3 * vi21c0;
const float vk01c1x0 = w[52];
const float vk01c1x1 = w[53];
const float vk01c1x2 = w[54];
const float vk01c1x3 = w[55];
const float vi01c1 = i0[1];
voc0 += vk01c1x0 * vi01c1;
voc1 += vk01c1x1 * vi01c1;
voc2 += vk01c1x2 * vi01c1;
voc3 += vk01c1x3 * vi01c1;
const float vk11c1x0 = w[56];
const float vk11c1x1 = w[57];
const float vk11c1x2 = w[58];
const float vk11c1x3 = w[59];
const float vi11c1 = i1[1];
voc0 += vk11c1x0 * vi11c1;
voc1 += vk11c1x1 * vi11c1;
voc2 += vk11c1x2 * vi11c1;
voc3 += vk11c1x3 * vi11c1;
const float vk21c1x0 = w[60];
const float vk21c1x1 = w[61];
const float vk21c1x2 = w[62];
const float vk21c1x3 = w[63];
const float vi21c1 = i2[1];
voc0 += vk21c1x0 * vi21c1;
voc1 += vk21c1x1 * vi21c1;
voc2 += vk21c1x2 * vi21c1;
voc3 += vk21c1x3 * vi21c1;
const float vk01c2x0 = w[64];
const float vk01c2x1 = w[65];
const float vk01c2x2 = w[66];
const float vk01c2x3 = w[67];
const float vi01c2 = i0[2];
voc0 += vk01c2x0 * vi01c2;
voc1 += vk01c2x1 * vi01c2;
voc2 += vk01c2x2 * vi01c2;
voc3 += vk01c2x3 * vi01c2;
const float vk11c2x0 = w[68];
const float vk11c2x1 = w[69];
const float vk11c2x2 = w[70];
const float vk11c2x3 = w[71];
const float vi11c2 = i1[2];
voc0 += vk11c2x0 * vi11c2;
voc1 += vk11c2x1 * vi11c2;
voc2 += vk11c2x2 * vi11c2;
voc3 += vk11c2x3 * vi11c2;
const float vk21c2x0 = w[72];
const float vk21c2x1 = w[73];
const float vk21c2x2 = w[74];
const float vk21c2x3 = w[75];
const float vi21c2 = i2[2];
voc0 += vk21c2x0 * vi21c2;
voc1 += vk21c2x1 * vi21c2;
voc2 += vk21c2x2 * vi21c2;
voc3 += vk21c2x3 * vi21c2;
voc0 = math_min_f32(voc0, voutput_max);
voc1 = math_min_f32(voc1, voutput_max);
voc2 = math_min_f32(voc2, voutput_max);
voc3 = math_min_f32(voc3, voutput_max);
voc0 = math_max_f32(voc0, voutput_min);
voc1 = math_max_f32(voc1, voutput_min);
voc2 = math_max_f32(voc2, voutput_min);
voc3 = math_max_f32(voc3, voutput_min);
if XNN_LIKELY(c >= 4) {
o0[0] = voc0;
o0[1] = voc1;
o0[2] = voc2;
o0[3] = voc3;
o0 = (float*) ((uintptr_t) o0 + output_width_stride);
} else {
float* o0_tmp = o0;
if (c & 2) {
o0_tmp[0] = voc0;
o0_tmp[1] = voc1;
o0_tmp += 2;
voc0 = voc2;
}
if (c & 1) {
*o0_tmp++ = voc0;
}
o0 = (float*) ((uintptr_t) o0 + output_width_stride);
}
}
// Move output pointers back to the position of the first pixel in a row,
// and forward to the next block of output channels
o0 = (float*) ((uintptr_t) o0 - output_channel_decrement);
// Revert input pointers to the position of the first pixel in a row
i0 = (const float*) ((uintptr_t) i0 - input_width_decrement);
i1 = (const float*) ((uintptr_t) i1 - input_width_decrement);
i2 = (const float*) ((uintptr_t) i2 - input_width_decrement);
// Move to the block of weights for the next 4 output channels
w += 112;
c = doz(c, 4);
} while (c != 0);
// Move output pointers back to the position of the first channel, and forward to the next block of rows
o0 = (float*) ((uintptr_t) o0 + output_height_increment);
// Move input pointers forward to the next row
i0 = i2;
i1 = (const float*) ((uintptr_t) i0 + input_height_stride);
i2 = (const float*) ((uintptr_t) i1 + input_height_stride);
}
}
| 20,090 | 28.372807 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-conv-hwc/gen/f32-conv-hwc-3x3s2p1c3x4-aarch64-neonfma-2x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-conv-hwc/3x3s2p1c3-neon-x1.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/conv.h>
#include <xnnpack/math.h>
void xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__aarch64_neonfma_2x1(
size_t input_height,
size_t input_width,
size_t output_y_start,
size_t output_y_end,
const float* input,
const float* zero,
const float* weights,
float* output,
size_t input_padding_top,
size_t output_channels,
size_t output_height_stride,
size_t output_width_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_width != 0);
assert(output_y_end > output_y_start);
assert(input_padding_top <= 1);
assert(output_channels != 0);
const size_t input_height_stride = input_width * 3 /* channels */ * sizeof(float);
const size_t input_width_decrement = input_width * 3 /* channels */ * sizeof(float);
const size_t output_width = (input_width + 1) / 2;
const size_t output_channel_decrement = output_width * output_width_stride - 4 * sizeof(float);
const size_t output_height_increment = output_height_stride * 2 - round_up_po2(output_channels, 4) * sizeof(float);
// Adjustment for padding processed below
const float* i0 = (const float*) ((uintptr_t) input +
input_height_stride * (output_y_start * 2 /* vertical stride */ - input_padding_top));
const float* i1 = (const float*) ((uintptr_t) i0 + input_height_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_height_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_height_stride);
const float* i4 = (const float*) ((uintptr_t) i3 + input_height_stride);
float* o0 = (float*) ((uintptr_t) output + output_height_stride * output_y_start);
float* o1 = (float*) ((uintptr_t) o0 + output_height_stride);
if XNN_UNPREDICTABLE(output_y_start < input_padding_top) {
i0 = zero;
}
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
for (size_t output_y = output_y_start; output_y < output_y_end; output_y += 2) {
const size_t input_y2 = output_y * 2 + 2 - input_padding_top;
const size_t input_y4 = input_y2 + 2;
if XNN_UNPREDICTABLE(input_y2 > input_height) {
i1 = zero;
}
if XNN_UNPREDICTABLE(input_y2 >= input_height) {
i2 = zero;
}
if XNN_UNPREDICTABLE(input_y4 > input_height) {
i3 = zero;
}
if XNN_UNPREDICTABLE(input_y4 >= input_height) {
i4 = zero;
}
if XNN_UNPREDICTABLE(output_y + 2 > output_y_end) {
o1 = o0;
}
const float* w = weights;
size_t c = output_channels;
do {
// viMx0 = ( iM0c2, iM0c1, iM0c0, --- )
float32x4_t vi0x0 = vmovq_n_f32(0.0f);
float32x4_t vi1x0 = vmovq_n_f32(0.0f);
float32x4_t vi2x0 = vmovq_n_f32(0.0f);
float32x4_t vi3x0 = vmovq_n_f32(0.0f);
float32x4_t vi4x0 = vmovq_n_f32(0.0f);
size_t iw = input_width;
for (; iw >= 2; iw -= 2) {
float32x4_t vo0c0123 = vld1q_f32(w);
float32x4_t vo1c0123 = vo0c0123;
const float32x4_t vk00c0x0123 = vld1q_f32(w + 4);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk00c0x0123, vget_low_f32(vi0x0), 1);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk00c0x0123, vget_low_f32(vi2x0), 1);
const float32x4_t vk10c0x0123 = vld1q_f32(w + 8);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk10c0x0123, vget_low_f32(vi1x0), 1);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk10c0x0123, vget_low_f32(vi3x0), 1);
const float32x4_t vk20c0x0123 = vld1q_f32(w + 12);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk20c0x0123, vget_low_f32(vi2x0), 1);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk20c0x0123, vget_low_f32(vi4x0), 1);
const float32x4_t vk00c1x0123 = vld1q_f32(w + 16);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk00c1x0123, vget_high_f32(vi0x0), 0);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk00c1x0123, vget_high_f32(vi2x0), 0);
const float32x4_t vk10c1x0123 = vld1q_f32(w + 20);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk10c1x0123, vget_high_f32(vi1x0), 0);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk10c1x0123, vget_high_f32(vi3x0), 0);
const float32x4_t vk20c1x0123 = vld1q_f32(w + 24);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk20c1x0123, vget_high_f32(vi2x0), 0);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk20c1x0123, vget_high_f32(vi4x0), 0);
const float32x4_t vk00c2x0123 = vld1q_f32(w + 28);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk00c2x0123, vget_high_f32(vi0x0), 1);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk00c2x0123, vget_high_f32(vi2x0), 1);
const float32x4_t vk10c2x0123 = vld1q_f32(w + 32);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk10c2x0123, vget_high_f32(vi1x0), 1);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk10c2x0123, vget_high_f32(vi3x0), 1);
const float32x4_t vk20c2x0123 = vld1q_f32(w + 36);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk20c2x0123, vget_high_f32(vi2x0), 1);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk20c2x0123, vget_high_f32(vi4x0), 1);
// viMx1 = ( iM2c0, iM1c2, iM1c1, iM1c0 )
const float32x4_t vi0x1 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x1 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x1 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x1 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x1 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk01c0x0123 = vld1q_f32(w + 40);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk01c0x0123, vget_low_f32(vi0x1), 0);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk01c0x0123, vget_low_f32(vi2x1), 0);
const float32x4_t vk11c0x0123 = vld1q_f32(w + 44);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk11c0x0123, vget_low_f32(vi1x1), 0);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk11c0x0123, vget_low_f32(vi3x1), 0);
const float32x4_t vk21c0x0123 = vld1q_f32(w + 48);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk21c0x0123, vget_low_f32(vi2x1), 0);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk21c0x0123, vget_low_f32(vi4x1), 0);
const float32x4_t vk01c1x0123 = vld1q_f32(w + 52);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk01c1x0123, vget_low_f32(vi0x1), 1);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk01c1x0123, vget_low_f32(vi2x1), 1);
const float32x4_t vk11c1x0123 = vld1q_f32(w + 56);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk11c1x0123, vget_low_f32(vi1x1), 1);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk11c1x0123, vget_low_f32(vi3x1), 1);
const float32x4_t vk21c1x0123 = vld1q_f32(w + 60);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk21c1x0123, vget_low_f32(vi2x1), 1);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk21c1x0123, vget_low_f32(vi4x1), 1);
const float32x4_t vk01c2x0123 = vld1q_f32(w + 64);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk01c2x0123, vget_high_f32(vi0x1), 0);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk01c2x0123, vget_high_f32(vi2x1), 0);
const float32x4_t vk11c2x0123 = vld1q_f32(w + 68);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk11c2x0123, vget_high_f32(vi1x1), 0);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk11c2x0123, vget_high_f32(vi3x1), 0);
const float32x4_t vk21c2x0123 = vld1q_f32(w + 72);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk21c2x0123, vget_high_f32(vi2x1), 0);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk21c2x0123, vget_high_f32(vi4x1), 0);
const float32x4_t vk02c0x0123 = vld1q_f32(w + 76);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk02c0x0123, vget_high_f32(vi0x1), 1);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk02c0x0123, vget_high_f32(vi2x1), 1);
const float32x4_t vk12c0x0123 = vld1q_f32(w + 80);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk12c0x0123, vget_high_f32(vi1x1), 1);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk12c0x0123, vget_high_f32(vi3x1), 1);
const float32x4_t vk22c0x0123 = vld1q_f32(w + 84);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk22c0x0123, vget_high_f32(vi2x1), 1);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk22c0x0123, vget_high_f32(vi4x1), 1);
// viMx2 = ( iM2c2, iM2c1 )
const float32x2_t vi0x2 = vld1_f32(i0); i0 += 2;
const float32x2_t vi1x2 = vld1_f32(i1); i1 += 2;
const float32x2_t vi2x2 = vld1_f32(i2); i2 += 2;
const float32x2_t vi3x2 = vld1_f32(i3); i3 += 2;
const float32x2_t vi4x2 = vld1_f32(i4); i4 += 2;
const float32x4_t vk02c1x0123 = vld1q_f32(w + 88);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk02c1x0123, vi0x2, 0);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk02c1x0123, vi2x2, 0);
const float32x4_t vk12c1x0123 = vld1q_f32(w + 92);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk12c1x0123, vi1x2, 0);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk12c1x0123, vi3x2, 0);
const float32x4_t vk22c1x0123 = vld1q_f32(w + 96);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk22c1x0123, vi2x2, 0);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk22c1x0123, vi4x2, 0);
const float32x4_t vk02c2x0123 = vld1q_f32(w + 100);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk02c2x0123, vi0x2, 1);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk02c2x0123, vi2x2, 1);
const float32x4_t vk12c2x0123 = vld1q_f32(w + 104);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk12c2x0123, vi1x2, 1);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk12c2x0123, vi3x2, 1);
const float32x4_t vk22c2x0123 = vld1q_f32(w + 108);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk22c2x0123, vi2x2, 1);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk22c2x0123, vi4x2, 1);
vi0x0 = vcombine_f32(vget_high_f32(vi0x1), vi0x2);
vi1x0 = vcombine_f32(vget_high_f32(vi1x1), vi1x2);
vi2x0 = vcombine_f32(vget_high_f32(vi2x1), vi2x2);
vi3x0 = vcombine_f32(vget_high_f32(vi3x1), vi3x2);
vi4x0 = vcombine_f32(vget_high_f32(vi4x1), vi4x2);
vo0c0123 = vmaxq_f32(vo0c0123, vmin);
vo1c0123 = vmaxq_f32(vo1c0123, vmin);
vo0c0123 = vminq_f32(vo0c0123, vmax);
vo1c0123 = vminq_f32(vo1c0123, vmax);
if XNN_LIKELY(c >= 4) {
vst1q_f32(o1, vo1c0123);
o1 = (float*) ((uintptr_t) o1 + output_width_stride);
vst1q_f32(o0, vo0c0123);
o0 = (float*) ((uintptr_t) o0 + output_width_stride);
} else {
float* o0_tmp = o0;
float* o1_tmp = o1;
float32x2_t vo0c01 = vget_low_f32(vo0c0123);
float32x2_t vo1c01 = vget_low_f32(vo1c0123);
if (c & 2) {
vst1_f32(o1_tmp, vo1c01); o1_tmp += 2;
vo1c01 = vget_high_f32(vo1c0123);
vst1_f32(o0_tmp, vo0c01); o0_tmp += 2;
vo0c01 = vget_high_f32(vo0c0123);
}
if (c & 1) {
vst1_lane_f32(o1_tmp, vo1c01, 0);
vst1_lane_f32(o0_tmp, vo0c01, 0);
}
o0 = (float*) ((uintptr_t) o0 + output_width_stride);
o1 = (float*) ((uintptr_t) o1 + output_width_stride);
}
}
assert(iw < 2);
if XNN_UNLIKELY(iw & 1) {
float32x4_t vo0c0123 = vld1q_f32(w);
float32x4_t vo1c0123 = vo0c0123;
const float32x4_t vk00c0x0123 = vld1q_f32(w + 4);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk00c0x0123, vget_low_f32(vi0x0), 1);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk00c0x0123, vget_low_f32(vi2x0), 1);
const float32x4_t vk10c0x0123 = vld1q_f32(w + 8);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk10c0x0123, vget_low_f32(vi1x0), 1);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk10c0x0123, vget_low_f32(vi3x0), 1);
const float32x4_t vk20c0x0123 = vld1q_f32(w + 12);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk20c0x0123, vget_low_f32(vi2x0), 1);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk20c0x0123, vget_low_f32(vi4x0), 1);
const float32x4_t vk00c1x0123 = vld1q_f32(w + 16);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk00c1x0123, vget_high_f32(vi0x0), 0);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk00c1x0123, vget_high_f32(vi2x0), 0);
const float32x4_t vk10c1x0123 = vld1q_f32(w + 20);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk10c1x0123, vget_high_f32(vi1x0), 0);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk10c1x0123, vget_high_f32(vi3x0), 0);
const float32x4_t vk20c1x0123 = vld1q_f32(w + 24);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk20c1x0123, vget_high_f32(vi2x0), 0);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk20c1x0123, vget_high_f32(vi4x0), 0);
const float32x4_t vk00c2x0123 = vld1q_f32(w + 28);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk00c2x0123, vget_high_f32(vi0x0), 1);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk00c2x0123, vget_high_f32(vi2x0), 1);
const float32x4_t vk10c2x0123 = vld1q_f32(w + 32);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk10c2x0123, vget_high_f32(vi1x0), 1);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk10c2x0123, vget_high_f32(vi3x0), 1);
const float32x4_t vk20c2x0123 = vld1q_f32(w + 36);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk20c2x0123, vget_high_f32(vi2x0), 1);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk20c2x0123, vget_high_f32(vi4x0), 1);
// viMx1 = ( iM2c0, iM1c2, iM1c1, iM1c0 )
const float32x4_t vi0x1 = vld1q_f32(i0); i0 += 3;
const float32x4_t vi1x1 = vld1q_f32(i1); i1 += 3;
const float32x4_t vi2x1 = vld1q_f32(i2); i2 += 3;
const float32x4_t vi3x1 = vld1q_f32(i3); i3 += 3;
const float32x4_t vi4x1 = vld1q_f32(i4); i4 += 3;
const float32x4_t vk01c0x0123 = vld1q_f32(w + 40);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk01c0x0123, vget_low_f32(vi0x1), 0);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk01c0x0123, vget_low_f32(vi2x1), 0);
const float32x4_t vk11c0x0123 = vld1q_f32(w + 44);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk11c0x0123, vget_low_f32(vi1x1), 0);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk11c0x0123, vget_low_f32(vi3x1), 0);
const float32x4_t vk21c0x0123 = vld1q_f32(w + 48);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk21c0x0123, vget_low_f32(vi2x1), 0);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk21c0x0123, vget_low_f32(vi4x1), 0);
const float32x4_t vk01c1x0123 = vld1q_f32(w + 52);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk01c1x0123, vget_low_f32(vi0x1), 1);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk01c1x0123, vget_low_f32(vi2x1), 1);
const float32x4_t vk11c1x0123 = vld1q_f32(w + 56);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk11c1x0123, vget_low_f32(vi1x1), 1);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk11c1x0123, vget_low_f32(vi3x1), 1);
const float32x4_t vk21c1x0123 = vld1q_f32(w + 60);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk21c1x0123, vget_low_f32(vi2x1), 1);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk21c1x0123, vget_low_f32(vi4x1), 1);
const float32x4_t vk01c2x0123 = vld1q_f32(w + 64);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk01c2x0123, vget_high_f32(vi0x1), 0);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk01c2x0123, vget_high_f32(vi2x1), 0);
const float32x4_t vk11c2x0123 = vld1q_f32(w + 68);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk11c2x0123, vget_high_f32(vi1x1), 0);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk11c2x0123, vget_high_f32(vi3x1), 0);
const float32x4_t vk21c2x0123 = vld1q_f32(w + 72);
vo0c0123 = vfmaq_lane_f32(vo0c0123, vk21c2x0123, vget_high_f32(vi2x1), 0);
vo1c0123 = vfmaq_lane_f32(vo1c0123, vk21c2x0123, vget_high_f32(vi4x1), 0);
vo0c0123 = vmaxq_f32(vo0c0123, vmin);
vo1c0123 = vmaxq_f32(vo1c0123, vmin);
vo0c0123 = vminq_f32(vo0c0123, vmax);
vo1c0123 = vminq_f32(vo1c0123, vmax);
if XNN_LIKELY(c >= 4) {
vst1q_f32(o1, vo1c0123);
o1 = (float*) ((uintptr_t) o1 + output_width_stride);
vst1q_f32(o0, vo0c0123);
o0 = (float*) ((uintptr_t) o0 + output_width_stride);
} else {
float* o0_tmp = o0;
float* o1_tmp = o1;
float32x2_t vo0c01 = vget_low_f32(vo0c0123);
float32x2_t vo1c01 = vget_low_f32(vo1c0123);
if (c & 2) {
vst1_f32(o1_tmp, vo1c01); o1_tmp += 2;
vo1c01 = vget_high_f32(vo1c0123);
vst1_f32(o0_tmp, vo0c01); o0_tmp += 2;
vo0c01 = vget_high_f32(vo0c0123);
}
if (c & 1) {
vst1_lane_f32(o1_tmp, vo1c01, 0);
vst1_lane_f32(o0_tmp, vo0c01, 0);
}
o0 = (float*) ((uintptr_t) o0 + output_width_stride);
o1 = (float*) ((uintptr_t) o1 + output_width_stride);
}
}
// Move output pointers back to the position of the first pixel in a row,
// and forward to the next block of output channels
o0 = (float*) ((uintptr_t) o0 - output_channel_decrement);
o1 = (float*) ((uintptr_t) o1 - output_channel_decrement);
// Revert input pointers to the position of the first pixel in a row
i0 = (const float*) ((uintptr_t) i0 - input_width_decrement);
i1 = (const float*) ((uintptr_t) i1 - input_width_decrement);
i2 = (const float*) ((uintptr_t) i2 - input_width_decrement);
i3 = (const float*) ((uintptr_t) i3 - input_width_decrement);
i4 = (const float*) ((uintptr_t) i4 - input_width_decrement);
// Move to the block of weights for the next 4 output channels
w += 112;
c = doz(c, 4);
} while (c != 0);
// Move output pointers back to the position of the first channel, and forward to the next block of rows
o0 = (float*) ((uintptr_t) o0 + output_height_increment);
o1 = (float*) ((uintptr_t) o1 + output_height_increment);
// Move input pointers forward to the next four rows
i0 = i4;
i1 = (const float*) ((uintptr_t) i0 + input_height_stride);
i2 = (const float*) ((uintptr_t) i1 + input_height_stride);
i3 = (const float*) ((uintptr_t) i2 + input_height_stride);
i4 = (const float*) ((uintptr_t) i3 + input_height_stride);
}
}
| 18,379 | 40.678005 | 117 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-conv-hwc/gen/f32-conv-hwc-3x3s2p1c3x4-neon-2x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-conv-hwc/3x3s2p1c3-neon-x1.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/conv.h>
#include <xnnpack/math.h>
void xnn_f32_conv_hwc_ukernel_3x3s2p1c3x4__neon_2x1(
size_t input_height,
size_t input_width,
size_t output_y_start,
size_t output_y_end,
const float* input,
const float* zero,
const float* weights,
float* output,
size_t input_padding_top,
size_t output_channels,
size_t output_height_stride,
size_t output_width_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_width != 0);
assert(output_y_end > output_y_start);
assert(input_padding_top <= 1);
assert(output_channels != 0);
const size_t input_height_stride = input_width * 3 /* channels */ * sizeof(float);
const size_t input_width_decrement = input_width * 3 /* channels */ * sizeof(float);
const size_t output_width = (input_width + 1) / 2;
const size_t output_channel_decrement = output_width * output_width_stride - 4 * sizeof(float);
const size_t output_height_increment = output_height_stride * 2 - round_up_po2(output_channels, 4) * sizeof(float);
// Adjustment for padding processed below
const float* i0 = (const float*) ((uintptr_t) input +
input_height_stride * (output_y_start * 2 /* vertical stride */ - input_padding_top));
const float* i1 = (const float*) ((uintptr_t) i0 + input_height_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_height_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_height_stride);
const float* i4 = (const float*) ((uintptr_t) i3 + input_height_stride);
float* o0 = (float*) ((uintptr_t) output + output_height_stride * output_y_start);
float* o1 = (float*) ((uintptr_t) o0 + output_height_stride);
if XNN_UNPREDICTABLE(output_y_start < input_padding_top) {
i0 = zero;
}
for (size_t output_y = output_y_start; output_y < output_y_end; output_y += 2) {
const size_t input_y2 = output_y * 2 + 2 - input_padding_top;
const size_t input_y4 = input_y2 + 2;
if XNN_UNPREDICTABLE(input_y2 > input_height) {
i1 = zero;
}
if XNN_UNPREDICTABLE(input_y2 >= input_height) {
i2 = zero;
}
if XNN_UNPREDICTABLE(input_y4 > input_height) {
i3 = zero;
}
if XNN_UNPREDICTABLE(input_y4 >= input_height) {
i4 = zero;
}
if XNN_UNPREDICTABLE(output_y + 2 > output_y_end) {
o1 = o0;
}
const float* w = weights;
size_t c = output_channels;
do {
// viMx0 = ( iM0c2, iM0c1, iM0c0, --- )
float32x4_t vi0x0 = vmovq_n_f32(0.0f);
float32x4_t vi1x0 = vmovq_n_f32(0.0f);
float32x4_t vi2x0 = vmovq_n_f32(0.0f);
float32x4_t vi3x0 = vmovq_n_f32(0.0f);
float32x4_t vi4x0 = vmovq_n_f32(0.0f);
size_t iw = input_width;
for (; iw >= 2; iw -= 2) {
float32x4_t vo0c0123 = vld1q_f32(w);
float32x4_t vo1c0123 = vo0c0123;
const float32x4_t vk00c0x0123 = vld1q_f32(w + 4);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk00c0x0123, vget_low_f32(vi0x0), 1);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk00c0x0123, vget_low_f32(vi2x0), 1);
const float32x4_t vk10c0x0123 = vld1q_f32(w + 8);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk10c0x0123, vget_low_f32(vi1x0), 1);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk10c0x0123, vget_low_f32(vi3x0), 1);
const float32x4_t vk20c0x0123 = vld1q_f32(w + 12);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk20c0x0123, vget_low_f32(vi2x0), 1);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk20c0x0123, vget_low_f32(vi4x0), 1);
const float32x4_t vk00c1x0123 = vld1q_f32(w + 16);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk00c1x0123, vget_high_f32(vi0x0), 0);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk00c1x0123, vget_high_f32(vi2x0), 0);
const float32x4_t vk10c1x0123 = vld1q_f32(w + 20);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk10c1x0123, vget_high_f32(vi1x0), 0);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk10c1x0123, vget_high_f32(vi3x0), 0);
const float32x4_t vk20c1x0123 = vld1q_f32(w + 24);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk20c1x0123, vget_high_f32(vi2x0), 0);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk20c1x0123, vget_high_f32(vi4x0), 0);
const float32x4_t vk00c2x0123 = vld1q_f32(w + 28);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk00c2x0123, vget_high_f32(vi0x0), 1);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk00c2x0123, vget_high_f32(vi2x0), 1);
const float32x4_t vk10c2x0123 = vld1q_f32(w + 32);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk10c2x0123, vget_high_f32(vi1x0), 1);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk10c2x0123, vget_high_f32(vi3x0), 1);
const float32x4_t vk20c2x0123 = vld1q_f32(w + 36);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk20c2x0123, vget_high_f32(vi2x0), 1);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk20c2x0123, vget_high_f32(vi4x0), 1);
// viMx1 = ( iM2c0, iM1c2, iM1c1, iM1c0 )
const float32x4_t vi0x1 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1x1 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2x1 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3x1 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4x1 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk01c0x0123 = vld1q_f32(w + 40);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk01c0x0123, vget_low_f32(vi0x1), 0);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk01c0x0123, vget_low_f32(vi2x1), 0);
const float32x4_t vk11c0x0123 = vld1q_f32(w + 44);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk11c0x0123, vget_low_f32(vi1x1), 0);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk11c0x0123, vget_low_f32(vi3x1), 0);
const float32x4_t vk21c0x0123 = vld1q_f32(w + 48);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk21c0x0123, vget_low_f32(vi2x1), 0);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk21c0x0123, vget_low_f32(vi4x1), 0);
const float32x4_t vk01c1x0123 = vld1q_f32(w + 52);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk01c1x0123, vget_low_f32(vi0x1), 1);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk01c1x0123, vget_low_f32(vi2x1), 1);
const float32x4_t vk11c1x0123 = vld1q_f32(w + 56);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk11c1x0123, vget_low_f32(vi1x1), 1);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk11c1x0123, vget_low_f32(vi3x1), 1);
const float32x4_t vk21c1x0123 = vld1q_f32(w + 60);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk21c1x0123, vget_low_f32(vi2x1), 1);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk21c1x0123, vget_low_f32(vi4x1), 1);
const float32x4_t vk01c2x0123 = vld1q_f32(w + 64);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk01c2x0123, vget_high_f32(vi0x1), 0);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk01c2x0123, vget_high_f32(vi2x1), 0);
const float32x4_t vk11c2x0123 = vld1q_f32(w + 68);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk11c2x0123, vget_high_f32(vi1x1), 0);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk11c2x0123, vget_high_f32(vi3x1), 0);
const float32x4_t vk21c2x0123 = vld1q_f32(w + 72);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk21c2x0123, vget_high_f32(vi2x1), 0);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk21c2x0123, vget_high_f32(vi4x1), 0);
const float32x4_t vk02c0x0123 = vld1q_f32(w + 76);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk02c0x0123, vget_high_f32(vi0x1), 1);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk02c0x0123, vget_high_f32(vi2x1), 1);
const float32x4_t vk12c0x0123 = vld1q_f32(w + 80);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk12c0x0123, vget_high_f32(vi1x1), 1);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk12c0x0123, vget_high_f32(vi3x1), 1);
const float32x4_t vk22c0x0123 = vld1q_f32(w + 84);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk22c0x0123, vget_high_f32(vi2x1), 1);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk22c0x0123, vget_high_f32(vi4x1), 1);
// viMx2 = ( iM2c2, iM2c1 )
const float32x2_t vi0x2 = vld1_f32(i0); i0 += 2;
const float32x2_t vi1x2 = vld1_f32(i1); i1 += 2;
const float32x2_t vi2x2 = vld1_f32(i2); i2 += 2;
const float32x2_t vi3x2 = vld1_f32(i3); i3 += 2;
const float32x2_t vi4x2 = vld1_f32(i4); i4 += 2;
const float32x4_t vk02c1x0123 = vld1q_f32(w + 88);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk02c1x0123, vi0x2, 0);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk02c1x0123, vi2x2, 0);
const float32x4_t vk12c1x0123 = vld1q_f32(w + 92);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk12c1x0123, vi1x2, 0);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk12c1x0123, vi3x2, 0);
const float32x4_t vk22c1x0123 = vld1q_f32(w + 96);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk22c1x0123, vi2x2, 0);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk22c1x0123, vi4x2, 0);
const float32x4_t vk02c2x0123 = vld1q_f32(w + 100);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk02c2x0123, vi0x2, 1);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk02c2x0123, vi2x2, 1);
const float32x4_t vk12c2x0123 = vld1q_f32(w + 104);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk12c2x0123, vi1x2, 1);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk12c2x0123, vi3x2, 1);
const float32x4_t vk22c2x0123 = vld1q_f32(w + 108);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk22c2x0123, vi2x2, 1);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk22c2x0123, vi4x2, 1);
vi0x0 = vcombine_f32(vget_high_f32(vi0x1), vi0x2);
vi1x0 = vcombine_f32(vget_high_f32(vi1x1), vi1x2);
vi2x0 = vcombine_f32(vget_high_f32(vi2x1), vi2x2);
vi3x0 = vcombine_f32(vget_high_f32(vi3x1), vi3x2);
vi4x0 = vcombine_f32(vget_high_f32(vi4x1), vi4x2);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vo0c0123 = vmaxq_f32(vo0c0123, vmin);
vo1c0123 = vmaxq_f32(vo1c0123, vmin);
vo0c0123 = vminq_f32(vo0c0123, vmax);
vo1c0123 = vminq_f32(vo1c0123, vmax);
if XNN_LIKELY(c >= 4) {
vst1q_f32(o1, vo1c0123);
o1 = (float*) ((uintptr_t) o1 + output_width_stride);
vst1q_f32(o0, vo0c0123);
o0 = (float*) ((uintptr_t) o0 + output_width_stride);
} else {
float* o0_tmp = o0;
float* o1_tmp = o1;
float32x2_t vo0c01 = vget_low_f32(vo0c0123);
float32x2_t vo1c01 = vget_low_f32(vo1c0123);
if (c & 2) {
vst1_f32(o1_tmp, vo1c01); o1_tmp += 2;
vo1c01 = vget_high_f32(vo1c0123);
vst1_f32(o0_tmp, vo0c01); o0_tmp += 2;
vo0c01 = vget_high_f32(vo0c0123);
}
if (c & 1) {
vst1_lane_f32(o1_tmp, vo1c01, 0);
vst1_lane_f32(o0_tmp, vo0c01, 0);
}
o0 = (float*) ((uintptr_t) o0 + output_width_stride);
o1 = (float*) ((uintptr_t) o1 + output_width_stride);
}
}
assert(iw < 2);
if XNN_UNLIKELY(iw & 1) {
float32x4_t vo0c0123 = vld1q_f32(w);
float32x4_t vo1c0123 = vo0c0123;
const float32x4_t vk00c0x0123 = vld1q_f32(w + 4);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk00c0x0123, vget_low_f32(vi0x0), 1);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk00c0x0123, vget_low_f32(vi2x0), 1);
const float32x4_t vk10c0x0123 = vld1q_f32(w + 8);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk10c0x0123, vget_low_f32(vi1x0), 1);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk10c0x0123, vget_low_f32(vi3x0), 1);
const float32x4_t vk20c0x0123 = vld1q_f32(w + 12);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk20c0x0123, vget_low_f32(vi2x0), 1);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk20c0x0123, vget_low_f32(vi4x0), 1);
const float32x4_t vk00c1x0123 = vld1q_f32(w + 16);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk00c1x0123, vget_high_f32(vi0x0), 0);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk00c1x0123, vget_high_f32(vi2x0), 0);
const float32x4_t vk10c1x0123 = vld1q_f32(w + 20);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk10c1x0123, vget_high_f32(vi1x0), 0);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk10c1x0123, vget_high_f32(vi3x0), 0);
const float32x4_t vk20c1x0123 = vld1q_f32(w + 24);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk20c1x0123, vget_high_f32(vi2x0), 0);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk20c1x0123, vget_high_f32(vi4x0), 0);
const float32x4_t vk00c2x0123 = vld1q_f32(w + 28);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk00c2x0123, vget_high_f32(vi0x0), 1);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk00c2x0123, vget_high_f32(vi2x0), 1);
const float32x4_t vk10c2x0123 = vld1q_f32(w + 32);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk10c2x0123, vget_high_f32(vi1x0), 1);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk10c2x0123, vget_high_f32(vi3x0), 1);
const float32x4_t vk20c2x0123 = vld1q_f32(w + 36);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk20c2x0123, vget_high_f32(vi2x0), 1);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk20c2x0123, vget_high_f32(vi4x0), 1);
// viMx1 = ( iM2c0, iM1c2, iM1c1, iM1c0 )
const float32x4_t vi0x1 = vld1q_f32(i0); i0 += 3;
const float32x4_t vi1x1 = vld1q_f32(i1); i1 += 3;
const float32x4_t vi2x1 = vld1q_f32(i2); i2 += 3;
const float32x4_t vi3x1 = vld1q_f32(i3); i3 += 3;
const float32x4_t vi4x1 = vld1q_f32(i4); i4 += 3;
const float32x4_t vk01c0x0123 = vld1q_f32(w + 40);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk01c0x0123, vget_low_f32(vi0x1), 0);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk01c0x0123, vget_low_f32(vi2x1), 0);
const float32x4_t vk11c0x0123 = vld1q_f32(w + 44);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk11c0x0123, vget_low_f32(vi1x1), 0);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk11c0x0123, vget_low_f32(vi3x1), 0);
const float32x4_t vk21c0x0123 = vld1q_f32(w + 48);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk21c0x0123, vget_low_f32(vi2x1), 0);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk21c0x0123, vget_low_f32(vi4x1), 0);
const float32x4_t vk01c1x0123 = vld1q_f32(w + 52);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk01c1x0123, vget_low_f32(vi0x1), 1);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk01c1x0123, vget_low_f32(vi2x1), 1);
const float32x4_t vk11c1x0123 = vld1q_f32(w + 56);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk11c1x0123, vget_low_f32(vi1x1), 1);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk11c1x0123, vget_low_f32(vi3x1), 1);
const float32x4_t vk21c1x0123 = vld1q_f32(w + 60);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk21c1x0123, vget_low_f32(vi2x1), 1);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk21c1x0123, vget_low_f32(vi4x1), 1);
const float32x4_t vk01c2x0123 = vld1q_f32(w + 64);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk01c2x0123, vget_high_f32(vi0x1), 0);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk01c2x0123, vget_high_f32(vi2x1), 0);
const float32x4_t vk11c2x0123 = vld1q_f32(w + 68);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk11c2x0123, vget_high_f32(vi1x1), 0);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk11c2x0123, vget_high_f32(vi3x1), 0);
const float32x4_t vk21c2x0123 = vld1q_f32(w + 72);
vo0c0123 = vmlaq_lane_f32(vo0c0123, vk21c2x0123, vget_high_f32(vi2x1), 0);
vo1c0123 = vmlaq_lane_f32(vo1c0123, vk21c2x0123, vget_high_f32(vi4x1), 0);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vo0c0123 = vmaxq_f32(vo0c0123, vmin);
vo1c0123 = vmaxq_f32(vo1c0123, vmin);
vo0c0123 = vminq_f32(vo0c0123, vmax);
vo1c0123 = vminq_f32(vo1c0123, vmax);
if XNN_LIKELY(c >= 4) {
vst1q_f32(o1, vo1c0123);
o1 = (float*) ((uintptr_t) o1 + output_width_stride);
vst1q_f32(o0, vo0c0123);
o0 = (float*) ((uintptr_t) o0 + output_width_stride);
} else {
float* o0_tmp = o0;
float* o1_tmp = o1;
float32x2_t vo0c01 = vget_low_f32(vo0c0123);
float32x2_t vo1c01 = vget_low_f32(vo1c0123);
if (c & 2) {
vst1_f32(o1_tmp, vo1c01); o1_tmp += 2;
vo1c01 = vget_high_f32(vo1c0123);
vst1_f32(o0_tmp, vo0c01); o0_tmp += 2;
vo0c01 = vget_high_f32(vo0c0123);
}
if (c & 1) {
vst1_lane_f32(o1_tmp, vo1c01, 0);
vst1_lane_f32(o0_tmp, vo0c01, 0);
}
o0 = (float*) ((uintptr_t) o0 + output_width_stride);
o1 = (float*) ((uintptr_t) o1 + output_width_stride);
}
}
// Move output pointers back to the position of the first pixel in a row,
// and forward to the next block of output channels
o0 = (float*) ((uintptr_t) o0 - output_channel_decrement);
o1 = (float*) ((uintptr_t) o1 - output_channel_decrement);
// Revert input pointers to the position of the first pixel in a row
i0 = (const float*) ((uintptr_t) i0 - input_width_decrement);
i1 = (const float*) ((uintptr_t) i1 - input_width_decrement);
i2 = (const float*) ((uintptr_t) i2 - input_width_decrement);
i3 = (const float*) ((uintptr_t) i3 - input_width_decrement);
i4 = (const float*) ((uintptr_t) i4 - input_width_decrement);
// Move to the block of weights for the next 4 output channels
w += 112;
c = doz(c, 4);
} while (c != 0);
// Move output pointers back to the position of the first channel, and forward to the next block of rows
o0 = (float*) ((uintptr_t) o0 + output_height_increment);
o1 = (float*) ((uintptr_t) o1 + output_height_increment);
// Move input pointers forward to the next four rows
i0 = i4;
i1 = (const float*) ((uintptr_t) i0 + input_height_stride);
i2 = (const float*) ((uintptr_t) i1 + input_height_stride);
i3 = (const float*) ((uintptr_t) i2 + input_height_stride);
i4 = (const float*) ((uintptr_t) i3 + input_height_stride);
}
}
| 18,518 | 40.803612 | 117 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-conv-hwc2chw/f32-conv-hwc2chw-3x3s2p1c3x4-scalar-1x1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/conv.h>
#include <xnnpack/math.h>
void xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__scalar_1x1(
size_t input_height,
size_t input_width,
size_t output_y_start,
size_t output_y_end,
const float* input,
const float* zero,
const float* weights,
float* output,
size_t input_padding_top,
size_t output_channels,
size_t output_height_stride,
size_t output_channel_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(input_width != 0);
assert(output_y_end > output_y_start);
assert(input_padding_top <= 1);
assert(output_channels != 0);
const size_t input_height_stride = input_width * 3 /* channels */ * sizeof(float);
const size_t input_width_decrement = round_down_po2(input_width, 2) * 3 /* channels */ * sizeof(float);
const size_t output_width = (input_width + 1) / 2;
const size_t output_channel_increment = output_channel_stride * 4 - output_width * sizeof(float);
// Adjustment for padding processed below
const float* i0 = (const float*) ((uintptr_t) input + input_height_stride * (output_y_start * 2 - input_padding_top));
const float* i1 = (const float*) ((uintptr_t) i0 + input_height_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_height_stride);
float* output0 = (float*) ((uintptr_t) output + output_height_stride * output_y_start);
if XNN_UNPREDICTABLE(output_y_start < input_padding_top) {
i0 = zero;
}
const float voutput_max = params->scalar.max;
const float voutput_min = params->scalar.min;
for (size_t output_y = output_y_start; output_y < output_y_end; output_y += 1) {
const size_t input_y2 = output_y * 2 + 2 - input_padding_top;
if XNN_UNPREDICTABLE(input_y2 >= input_height) {
i2 = zero;
}
const float* w = weights;
size_t c = output_channels;
float* o0c0 = output0;
float* o0c1 = (float*) ((uintptr_t) o0c0 + output_channel_stride);
float* o0c2 = (float*) ((uintptr_t) o0c1 + output_channel_stride);
float* o0c3 = (float*) ((uintptr_t) o0c2 + output_channel_stride);
do {
if XNN_UNPREDICTABLE(c < 2) {
o0c1 = o0c0;
}
if XNN_UNPREDICTABLE(c <= 2) {
o0c2 = o0c1;
}
if XNN_UNPREDICTABLE(c < 4) {
o0c3 = o0c2;
}
// Left edge padding
float vi00c0 = 0.0f;
float vi00c1 = 0.0f;
float vi00c2 = 0.0f;
float vi10c0 = 0.0f;
float vi10c1 = 0.0f;
float vi10c2 = 0.0f;
float vi20c0 = 0.0f;
float vi20c1 = 0.0f;
float vi20c2 = 0.0f;
size_t iw = input_width;
for (; iw >= 2; iw -= 2) {
float voc0 = w[0];
float voc1 = w[1];
float voc2 = w[2];
float voc3 = w[3];
const float vk00c0x0 = w[4];
const float vk00c0x1 = w[5];
const float vk00c0x2 = w[6];
const float vk00c0x3 = w[7];
voc0 += vk00c0x0 * vi00c0;
voc1 += vk00c0x1 * vi00c0;
voc2 += vk00c0x2 * vi00c0;
voc3 += vk00c0x3 * vi00c0;
const float vk10c0x0 = w[8];
const float vk10c0x1 = w[9];
const float vk10c0x2 = w[10];
const float vk10c0x3 = w[11];
voc0 += vk10c0x0 * vi10c0;
voc1 += vk10c0x1 * vi10c0;
voc2 += vk10c0x2 * vi10c0;
voc3 += vk10c0x3 * vi10c0;
const float vk20c0x0 = w[12];
const float vk20c0x1 = w[13];
const float vk20c0x2 = w[14];
const float vk20c0x3 = w[15];
voc0 += vk20c0x0 * vi20c0;
voc1 += vk20c0x1 * vi20c0;
voc2 += vk20c0x2 * vi20c0;
voc3 += vk20c0x3 * vi20c0;
const float vk00c1x0 = w[16];
const float vk00c1x1 = w[17];
const float vk00c1x2 = w[18];
const float vk00c1x3 = w[19];
voc0 += vk00c1x0 * vi00c1;
voc1 += vk00c1x1 * vi00c1;
voc2 += vk00c1x2 * vi00c1;
voc3 += vk00c1x3 * vi00c1;
const float vk10c1x0 = w[20];
const float vk10c1x1 = w[21];
const float vk10c1x2 = w[22];
const float vk10c1x3 = w[23];
voc0 += vk10c1x0 * vi10c1;
voc1 += vk10c1x1 * vi10c1;
voc2 += vk10c1x2 * vi10c1;
voc3 += vk10c1x3 * vi10c1;
const float vk20c1x0 = w[24];
const float vk20c1x1 = w[25];
const float vk20c1x2 = w[26];
const float vk20c1x3 = w[27];
voc0 += vk20c1x0 * vi20c1;
voc1 += vk20c1x1 * vi20c1;
voc2 += vk20c1x2 * vi20c1;
voc3 += vk20c1x3 * vi20c1;
const float vk00c2x0 = w[28];
const float vk00c2x1 = w[29];
const float vk00c2x2 = w[30];
const float vk00c2x3 = w[31];
voc0 += vk00c2x0 * vi00c2;
voc1 += vk00c2x1 * vi00c2;
voc2 += vk00c2x2 * vi00c2;
voc3 += vk00c2x3 * vi00c2;
const float vk10c2x0 = w[32];
const float vk10c2x1 = w[33];
const float vk10c2x2 = w[34];
const float vk10c2x3 = w[35];
voc0 += vk10c2x0 * vi10c2;
voc1 += vk10c2x1 * vi10c2;
voc2 += vk10c2x2 * vi10c2;
voc3 += vk10c2x3 * vi10c2;
const float vk20c2x0 = w[36];
const float vk20c2x1 = w[37];
const float vk20c2x2 = w[38];
const float vk20c2x3 = w[39];
voc0 += vk20c2x0 * vi20c2;
voc1 += vk20c2x1 * vi20c2;
voc2 += vk20c2x2 * vi20c2;
voc3 += vk20c2x3 * vi20c2;
const float vk01c0x0 = w[40];
const float vk01c0x1 = w[41];
const float vk01c0x2 = w[42];
const float vk01c0x3 = w[43];
const float vi01c0 = i0[0];
voc0 += vk01c0x0 * vi01c0;
voc1 += vk01c0x1 * vi01c0;
voc2 += vk01c0x2 * vi01c0;
voc3 += vk01c0x3 * vi01c0;
const float vk11c0x0 = w[44];
const float vk11c0x1 = w[45];
const float vk11c0x2 = w[46];
const float vk11c0x3 = w[47];
const float vi11c0 = i1[0];
voc0 += vk11c0x0 * vi11c0;
voc1 += vk11c0x1 * vi11c0;
voc2 += vk11c0x2 * vi11c0;
voc3 += vk11c0x3 * vi11c0;
const float vk21c0x0 = w[48];
const float vk21c0x1 = w[49];
const float vk21c0x2 = w[50];
const float vk21c0x3 = w[51];
const float vi21c0 = i2[0];
voc0 += vk21c0x0 * vi21c0;
voc1 += vk21c0x1 * vi21c0;
voc2 += vk21c0x2 * vi21c0;
voc3 += vk21c0x3 * vi21c0;
const float vk01c1x0 = w[52];
const float vk01c1x1 = w[53];
const float vk01c1x2 = w[54];
const float vk01c1x3 = w[55];
const float vi01c1 = i0[1];
voc0 += vk01c1x0 * vi01c1;
voc1 += vk01c1x1 * vi01c1;
voc2 += vk01c1x2 * vi01c1;
voc3 += vk01c1x3 * vi01c1;
const float vk11c1x0 = w[56];
const float vk11c1x1 = w[57];
const float vk11c1x2 = w[58];
const float vk11c1x3 = w[59];
const float vi11c1 = i1[1];
voc0 += vk11c1x0 * vi11c1;
voc1 += vk11c1x1 * vi11c1;
voc2 += vk11c1x2 * vi11c1;
voc3 += vk11c1x3 * vi11c1;
const float vk21c1x0 = w[60];
const float vk21c1x1 = w[61];
const float vk21c1x2 = w[62];
const float vk21c1x3 = w[63];
const float vi21c1 = i2[1];
voc0 += vk21c1x0 * vi21c1;
voc1 += vk21c1x1 * vi21c1;
voc2 += vk21c1x2 * vi21c1;
voc3 += vk21c1x3 * vi21c1;
const float vk01c2x0 = w[64];
const float vk01c2x1 = w[65];
const float vk01c2x2 = w[66];
const float vk01c2x3 = w[67];
const float vi01c2 = i0[2];
voc0 += vk01c2x0 * vi01c2;
voc1 += vk01c2x1 * vi01c2;
voc2 += vk01c2x2 * vi01c2;
voc3 += vk01c2x3 * vi01c2;
const float vk11c2x0 = w[68];
const float vk11c2x1 = w[69];
const float vk11c2x2 = w[70];
const float vk11c2x3 = w[71];
const float vi11c2 = i1[2];
voc0 += vk11c2x0 * vi11c2;
voc1 += vk11c2x1 * vi11c2;
voc2 += vk11c2x2 * vi11c2;
voc3 += vk11c2x3 * vi11c2;
const float vk21c2x0 = w[72];
const float vk21c2x1 = w[73];
const float vk21c2x2 = w[74];
const float vk21c2x3 = w[75];
const float vi21c2 = i2[2];
voc0 += vk21c2x0 * vi21c2;
voc1 += vk21c2x1 * vi21c2;
voc2 += vk21c2x2 * vi21c2;
voc3 += vk21c2x3 * vi21c2;
const float vk02c0x0 = w[76];
const float vk02c0x1 = w[77];
const float vk02c0x2 = w[78];
const float vk02c0x3 = w[79];
const float vi02c0 = i0[3];
voc0 += vk02c0x0 * vi02c0;
voc1 += vk02c0x1 * vi02c0;
voc2 += vk02c0x2 * vi02c0;
voc3 += vk02c0x3 * vi02c0;
const float vk12c0x0 = w[80];
const float vk12c0x1 = w[81];
const float vk12c0x2 = w[82];
const float vk12c0x3 = w[83];
const float vi12c0 = i1[3];
voc0 += vk12c0x0 * vi12c0;
voc1 += vk12c0x1 * vi12c0;
voc2 += vk12c0x2 * vi12c0;
voc3 += vk12c0x3 * vi12c0;
const float vk22c0x0 = w[84];
const float vk22c0x1 = w[85];
const float vk22c0x2 = w[86];
const float vk22c0x3 = w[87];
const float vi22c0 = i2[3];
voc0 += vk22c0x0 * vi22c0;
voc1 += vk22c0x1 * vi22c0;
voc2 += vk22c0x2 * vi22c0;
voc3 += vk22c0x3 * vi22c0;
vi00c0 = vi02c0;
vi10c0 = vi12c0;
vi20c0 = vi22c0;
const float vk02c1x0 = w[88];
const float vk02c1x1 = w[89];
const float vk02c1x2 = w[90];
const float vk02c1x3 = w[91];
const float vi02c1 = i0[4];
voc0 += vk02c1x0 * vi02c1;
voc1 += vk02c1x1 * vi02c1;
voc2 += vk02c1x2 * vi02c1;
voc3 += vk02c1x3 * vi02c1;
const float vk12c1x0 = w[92];
const float vk12c1x1 = w[93];
const float vk12c1x2 = w[94];
const float vk12c1x3 = w[95];
const float vi12c1 = i1[4];
voc0 += vk12c1x0 * vi12c1;
voc1 += vk12c1x1 * vi12c1;
voc2 += vk12c1x2 * vi12c1;
voc3 += vk12c1x3 * vi12c1;
const float vk22c1x0 = w[96];
const float vk22c1x1 = w[97];
const float vk22c1x2 = w[98];
const float vk22c1x3 = w[99];
const float vi22c1 = i2[4];
voc0 += vk22c1x0 * vi22c1;
voc1 += vk22c1x1 * vi22c1;
voc2 += vk22c1x2 * vi22c1;
voc3 += vk22c1x3 * vi22c1;
vi00c1 = vi02c1;
vi10c1 = vi12c1;
vi20c1 = vi22c1;
const float vk02c2x0 = w[100];
const float vk02c2x1 = w[101];
const float vk02c2x2 = w[102];
const float vk02c2x3 = w[103];
const float vi02c2 = i0[5];
voc0 += vk02c2x0 * vi02c2;
voc1 += vk02c2x1 * vi02c2;
voc2 += vk02c2x2 * vi02c2;
voc3 += vk02c2x3 * vi02c2;
const float vk12c2x0 = w[104];
const float vk12c2x1 = w[105];
const float vk12c2x2 = w[106];
const float vk12c2x3 = w[107];
const float vi12c2 = i1[5];
voc0 += vk12c2x0 * vi12c2;
voc1 += vk12c2x1 * vi12c2;
voc2 += vk12c2x2 * vi12c2;
voc3 += vk12c2x3 * vi12c2;
const float vk22c2x0 = w[108];
const float vk22c2x1 = w[109];
const float vk22c2x2 = w[110];
const float vk22c2x3 = w[111];
const float vi22c2 = i2[5];
voc0 += vk22c2x0 * vi22c2;
voc1 += vk22c2x1 * vi22c2;
voc2 += vk22c2x2 * vi22c2;
voc3 += vk22c2x3 * vi22c2;
vi00c2 = vi02c2;
vi10c2 = vi12c2;
vi20c2 = vi22c2;
voc0 = math_min_f32(voc0, voutput_max);
voc1 = math_min_f32(voc1, voutput_max);
voc2 = math_min_f32(voc2, voutput_max);
voc3 = math_min_f32(voc3, voutput_max);
voc0 = math_max_f32(voc0, voutput_min);
voc1 = math_max_f32(voc1, voutput_min);
voc2 = math_max_f32(voc2, voutput_min);
voc3 = math_max_f32(voc3, voutput_min);
*o0c0++ = voc0;
*o0c1++ = voc1;
*o0c2++ = voc2;
*o0c3++ = voc3;
i0 += 6;
i1 += 6;
i2 += 6;
}
assert(iw < 2);
if XNN_UNLIKELY(iw != 0) {
float voc0 = w[0];
float voc1 = w[1];
float voc2 = w[2];
float voc3 = w[3];
const float vk00c0x0 = w[4];
const float vk00c0x1 = w[5];
const float vk00c0x2 = w[6];
const float vk00c0x3 = w[7];
voc0 += vk00c0x0 * vi00c0;
voc1 += vk00c0x1 * vi00c0;
voc2 += vk00c0x2 * vi00c0;
voc3 += vk00c0x3 * vi00c0;
const float vk10c0x0 = w[8];
const float vk10c0x1 = w[9];
const float vk10c0x2 = w[10];
const float vk10c0x3 = w[11];
voc0 += vk10c0x0 * vi10c0;
voc1 += vk10c0x1 * vi10c0;
voc2 += vk10c0x2 * vi10c0;
voc3 += vk10c0x3 * vi10c0;
const float vk20c0x0 = w[12];
const float vk20c0x1 = w[13];
const float vk20c0x2 = w[14];
const float vk20c0x3 = w[15];
voc0 += vk20c0x0 * vi20c0;
voc1 += vk20c0x1 * vi20c0;
voc2 += vk20c0x2 * vi20c0;
voc3 += vk20c0x3 * vi20c0;
const float vk00c1x0 = w[16];
const float vk00c1x1 = w[17];
const float vk00c1x2 = w[18];
const float vk00c1x3 = w[19];
voc0 += vk00c1x0 * vi00c1;
voc1 += vk00c1x1 * vi00c1;
voc2 += vk00c1x2 * vi00c1;
voc3 += vk00c1x3 * vi00c1;
const float vk10c1x0 = w[20];
const float vk10c1x1 = w[21];
const float vk10c1x2 = w[22];
const float vk10c1x3 = w[23];
voc0 += vk10c1x0 * vi10c1;
voc1 += vk10c1x1 * vi10c1;
voc2 += vk10c1x2 * vi10c1;
voc3 += vk10c1x3 * vi10c1;
const float vk20c1x0 = w[24];
const float vk20c1x1 = w[25];
const float vk20c1x2 = w[26];
const float vk20c1x3 = w[27];
voc0 += vk20c1x0 * vi20c1;
voc1 += vk20c1x1 * vi20c1;
voc2 += vk20c1x2 * vi20c1;
voc3 += vk20c1x3 * vi20c1;
const float vk00c2x0 = w[28];
const float vk00c2x1 = w[29];
const float vk00c2x2 = w[30];
const float vk00c2x3 = w[31];
voc0 += vk00c2x0 * vi00c2;
voc1 += vk00c2x1 * vi00c2;
voc2 += vk00c2x2 * vi00c2;
voc3 += vk00c2x3 * vi00c2;
const float vk10c2x0 = w[32];
const float vk10c2x1 = w[33];
const float vk10c2x2 = w[34];
const float vk10c2x3 = w[35];
voc0 += vk10c2x0 * vi10c2;
voc1 += vk10c2x1 * vi10c2;
voc2 += vk10c2x2 * vi10c2;
voc3 += vk10c2x3 * vi10c2;
const float vk20c2x0 = w[36];
const float vk20c2x1 = w[37];
const float vk20c2x2 = w[38];
const float vk20c2x3 = w[39];
voc0 += vk20c2x0 * vi20c2;
voc1 += vk20c2x1 * vi20c2;
voc2 += vk20c2x2 * vi20c2;
voc3 += vk20c2x3 * vi20c2;
const float vk01c0x0 = w[40];
const float vk01c0x1 = w[41];
const float vk01c0x2 = w[42];
const float vk01c0x3 = w[43];
const float vi01c0 = i0[0];
voc0 += vk01c0x0 * vi01c0;
voc1 += vk01c0x1 * vi01c0;
voc2 += vk01c0x2 * vi01c0;
voc3 += vk01c0x3 * vi01c0;
const float vk11c0x0 = w[44];
const float vk11c0x1 = w[45];
const float vk11c0x2 = w[46];
const float vk11c0x3 = w[47];
const float vi11c0 = i1[0];
voc0 += vk11c0x0 * vi11c0;
voc1 += vk11c0x1 * vi11c0;
voc2 += vk11c0x2 * vi11c0;
voc3 += vk11c0x3 * vi11c0;
const float vk21c0x0 = w[48];
const float vk21c0x1 = w[49];
const float vk21c0x2 = w[50];
const float vk21c0x3 = w[51];
const float vi21c0 = i2[0];
voc0 += vk21c0x0 * vi21c0;
voc1 += vk21c0x1 * vi21c0;
voc2 += vk21c0x2 * vi21c0;
voc3 += vk21c0x3 * vi21c0;
const float vk01c1x0 = w[52];
const float vk01c1x1 = w[53];
const float vk01c1x2 = w[54];
const float vk01c1x3 = w[55];
const float vi01c1 = i0[1];
voc0 += vk01c1x0 * vi01c1;
voc1 += vk01c1x1 * vi01c1;
voc2 += vk01c1x2 * vi01c1;
voc3 += vk01c1x3 * vi01c1;
const float vk11c1x0 = w[56];
const float vk11c1x1 = w[57];
const float vk11c1x2 = w[58];
const float vk11c1x3 = w[59];
const float vi11c1 = i1[1];
voc0 += vk11c1x0 * vi11c1;
voc1 += vk11c1x1 * vi11c1;
voc2 += vk11c1x2 * vi11c1;
voc3 += vk11c1x3 * vi11c1;
const float vk21c1x0 = w[60];
const float vk21c1x1 = w[61];
const float vk21c1x2 = w[62];
const float vk21c1x3 = w[63];
const float vi21c1 = i2[1];
voc0 += vk21c1x0 * vi21c1;
voc1 += vk21c1x1 * vi21c1;
voc2 += vk21c1x2 * vi21c1;
voc3 += vk21c1x3 * vi21c1;
const float vk01c2x0 = w[64];
const float vk01c2x1 = w[65];
const float vk01c2x2 = w[66];
const float vk01c2x3 = w[67];
const float vi01c2 = i0[2];
voc0 += vk01c2x0 * vi01c2;
voc1 += vk01c2x1 * vi01c2;
voc2 += vk01c2x2 * vi01c2;
voc3 += vk01c2x3 * vi01c2;
const float vk11c2x0 = w[68];
const float vk11c2x1 = w[69];
const float vk11c2x2 = w[70];
const float vk11c2x3 = w[71];
const float vi11c2 = i1[2];
voc0 += vk11c2x0 * vi11c2;
voc1 += vk11c2x1 * vi11c2;
voc2 += vk11c2x2 * vi11c2;
voc3 += vk11c2x3 * vi11c2;
const float vk21c2x0 = w[72];
const float vk21c2x1 = w[73];
const float vk21c2x2 = w[74];
const float vk21c2x3 = w[75];
const float vi21c2 = i2[2];
voc0 += vk21c2x0 * vi21c2;
voc1 += vk21c2x1 * vi21c2;
voc2 += vk21c2x2 * vi21c2;
voc3 += vk21c2x3 * vi21c2;
voc0 = math_min_f32(voc0, voutput_max);
voc1 = math_min_f32(voc1, voutput_max);
voc2 = math_min_f32(voc2, voutput_max);
voc3 = math_min_f32(voc3, voutput_max);
voc0 = math_max_f32(voc0, voutput_min);
voc1 = math_max_f32(voc1, voutput_min);
voc2 = math_max_f32(voc2, voutput_min);
voc3 = math_max_f32(voc3, voutput_min);
*o0c0++ = voc0;
*o0c1++ = voc1;
*o0c2++ = voc2;
*o0c3++ = voc3;
}
// Move output pointers back to the position of the first pixel in a row,
// and forward to the next block of output channels.
o0c0 = (float*) ((uintptr_t) o0c0 + output_channel_increment);
o0c1 = (float*) ((uintptr_t) o0c1 + output_channel_increment);
o0c2 = (float*) ((uintptr_t) o0c2 + output_channel_increment);
o0c3 = (float*) ((uintptr_t) o0c3 + output_channel_increment);
// Revert input pointers to the position of the first pixel in a row
i0 = (const float*) ((uintptr_t) i0 - input_width_decrement);
i1 = (const float*) ((uintptr_t) i1 - input_width_decrement);
i2 = (const float*) ((uintptr_t) i2 - input_width_decrement);
// Move to the block of weights for the next 4 output channels
w += 112;
c = doz(c, 4);
} while (c != 0);
// Move output pointers forward to the next row
output0 = (float*) ((uintptr_t) output0 + output_height_stride);
// Move input pointers forward to the next row
i0 = i2;
i1 = (const float*) ((uintptr_t) i0 + input_height_stride);
i2 = (const float*) ((uintptr_t) i1 + input_height_stride);
}
}
| 19,732 | 28.452239 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-conv-hwc2chw/f32-conv-hwc2chw-3x3s2p1c3x4-sse-1x1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/conv.h>
#include <xnnpack/math.h>
void xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__sse_1x1(
size_t input_height,
size_t input_width,
size_t output_y_start,
size_t output_y_end,
const float* input,
const float* zero,
const float* weights,
float* output,
size_t input_padding_top,
size_t output_channels,
size_t output_height_stride,
size_t output_channel_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(input_width != 0);
assert(output_y_end > output_y_start);
assert(input_padding_top <= 1);
assert(output_channels != 0);
const size_t input_height_stride = input_width * 3 /* channels */ * sizeof(float);
const size_t input_width_decrement = round_down_po2(input_width, 2) * 3 /* channels */ * sizeof(float);
const size_t output_width = (input_width + 1) / 2;
const size_t output_channel_increment = output_channel_stride * 4 - output_width * sizeof(float);
// Adjustment for padding processed below
const float* i0 = (const float*) ((uintptr_t) input + input_height_stride * (output_y_start * 2 - input_padding_top));
const float* i1 = (const float*) ((uintptr_t) i0 + input_height_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_height_stride);
float* output0 = (float*) ((uintptr_t) output + output_height_stride * output_y_start);
if XNN_UNPREDICTABLE(output_y_start < input_padding_top) {
i0 = zero;
}
const __m128 vmin = _mm_load_ps(params->sse.min);
const __m128 vmax = _mm_load_ps(params->sse.max);
for (size_t output_y = output_y_start; output_y < output_y_end; output_y += 1) {
const size_t input_y2 = output_y * 2 + 2 - input_padding_top;
if XNN_UNPREDICTABLE(input_y2 >= input_height) {
i2 = zero;
}
const float* w = weights;
size_t c = output_channels;
float* o0c0 = output0;
float* o0c1 = (float*) ((uintptr_t) o0c0 + output_channel_stride);
float* o0c2 = (float*) ((uintptr_t) o0c1 + output_channel_stride);
float* o0c3 = (float*) ((uintptr_t) o0c2 + output_channel_stride);
do {
if XNN_UNPREDICTABLE(c < 2) {
o0c1 = o0c0;
}
if XNN_UNPREDICTABLE(c <= 2) {
o0c2 = o0c1;
}
if XNN_UNPREDICTABLE(c < 4) {
o0c3 = o0c2;
}
// Left edge padding
__m128 vi00c0 = _mm_setzero_ps();
__m128 vi00c1 = _mm_setzero_ps();
__m128 vi00c2 = _mm_setzero_ps();
__m128 vi10c0 = _mm_setzero_ps();
__m128 vi10c1 = _mm_setzero_ps();
__m128 vi10c2 = _mm_setzero_ps();
__m128 vi20c0 = _mm_setzero_ps();
__m128 vi20c1 = _mm_setzero_ps();
__m128 vi20c2 = _mm_setzero_ps();
size_t iw = input_width;
for (; iw >= 2; iw -= 2) {
__m128 voc0123 = _mm_loadu_ps(w);
const __m128 vk00c0x0123 = _mm_load_ps(w + 4);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk00c0x0123, vi00c0));
const __m128 vk10c0x0123 = _mm_load_ps(w + 8);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk10c0x0123, vi10c0));
const __m128 vk20c0x0123 = _mm_load_ps(w + 12);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk20c0x0123, vi20c0));
const __m128 vk00c1x0123 = _mm_load_ps(w + 16);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk00c1x0123, vi00c1));
const __m128 vk10c1x0123 = _mm_load_ps(w + 20);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk10c1x0123, vi10c1));
const __m128 vk20c1x0123 = _mm_load_ps(w + 24);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk20c1x0123, vi20c1));
const __m128 vk00c2x0123 = _mm_load_ps(w + 28);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk00c2x0123, vi00c2));
const __m128 vk10c2x0123 = _mm_load_ps(w + 32);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk10c2x0123, vi10c2));
const __m128 vk20c2x0123 = _mm_load_ps(w + 36);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk20c2x0123, vi20c2));
const __m128 vk01c0x0123 = _mm_load_ps(w + 40);
const __m128 vi01c0 = _mm_load1_ps(i0);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk01c0x0123, vi01c0));
const __m128 vk11c0x0123 = _mm_load_ps(w + 44);
const __m128 vi11c0 = _mm_load1_ps(i1);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk11c0x0123, vi11c0));
const __m128 vk21c0x0123 = _mm_load_ps(w + 48);
const __m128 vi21c0 = _mm_load1_ps(i2);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk21c0x0123, vi21c0));
const __m128 vk01c1x0123 = _mm_load_ps(w + 52);
const __m128 vi01c1 = _mm_load1_ps(i0 + 1);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk01c1x0123, vi01c1));
const __m128 vk11c1x0123 = _mm_load_ps(w + 56);
const __m128 vi11c1 = _mm_load1_ps(i1 + 1);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk11c1x0123, vi11c1));
const __m128 vk21c1x0123 = _mm_load_ps(w + 60);
const __m128 vi21c1 = _mm_load1_ps(i2 + 1);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk21c1x0123, vi21c1));
const __m128 vk01c2x0123 = _mm_load_ps(w + 64);
const __m128 vi01c2 = _mm_load1_ps(i0 + 2);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk01c2x0123, vi01c2));
const __m128 vk11c2x0123 = _mm_load_ps(w + 68);
const __m128 vi11c2 = _mm_load1_ps(i1 + 2);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk11c2x0123, vi11c2));
const __m128 vk21c2x0123 = _mm_load_ps(w + 72);
const __m128 vi21c2 = _mm_load1_ps(i2 + 2);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk21c2x0123, vi21c2));
const __m128 vk02c0x0123 = _mm_load_ps(w + 76);
const __m128 vi02c0 = _mm_load1_ps(i0 + 3);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk02c0x0123, vi02c0));
const __m128 vk12c0x0123 = _mm_load_ps(w + 80);
const __m128 vi12c0 = _mm_load1_ps(i1 + 3);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk12c0x0123, vi12c0));
const __m128 vk22c0x0123 = _mm_load_ps(w + 84);
const __m128 vi22c0 = _mm_load1_ps(i2 + 3);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk22c0x0123, vi22c0));
vi00c0 = vi02c0;
vi10c0 = vi12c0;
vi20c0 = vi22c0;
const __m128 vk02c1x0123 = _mm_load_ps(w + 88);
const __m128 vi02c1 = _mm_load1_ps(i0 + 4);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk02c1x0123, vi02c1));
const __m128 vk12c1x0123 = _mm_load_ps(w + 92);
const __m128 vi12c1 = _mm_load1_ps(i1 + 4);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk12c1x0123, vi12c1));
const __m128 vk22c1x0123 = _mm_load_ps(w + 96);
const __m128 vi22c1 = _mm_load1_ps(i2 + 4);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk22c1x0123, vi22c1));
vi00c1 = vi02c1;
vi10c1 = vi12c1;
vi20c1 = vi22c1;
const __m128 vk02c2x0123 = _mm_load_ps(w + 100);
const __m128 vi02c2 = _mm_load1_ps(i0 + 5);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk02c2x0123, vi02c2));
const __m128 vk12c2x0123 = _mm_load_ps(w + 104);
const __m128 vi12c2 = _mm_load1_ps(i1 + 5);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk12c2x0123, vi12c2));
const __m128 vk22c2x0123 = _mm_load_ps(w + 108);
const __m128 vi22c2 = _mm_load1_ps(i2 + 5);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk22c2x0123, vi22c2));
vi00c2 = vi02c2;
vi10c2 = vi12c2;
vi20c2 = vi22c2;
voc0123 = _mm_min_ps(voc0123, vmax);
voc0123 = _mm_max_ps(voc0123, vmin);
_mm_store_ss(o0c0, voc0123); o0c0++;
_mm_store_ss(o0c1, _mm_shuffle_ps(voc0123, voc0123, 1)); o0c1++;
_mm_store_ss(o0c2, _mm_shuffle_ps(voc0123, voc0123, 2)); o0c2++;
_mm_store_ss(o0c3, _mm_shuffle_ps(voc0123, voc0123, 3)); o0c3++;
i0 += 6;
i1 += 6;
i2 += 6;
}
assert(iw < 2);
if XNN_UNLIKELY(iw != 0) {
__m128 voc0123 = _mm_load_ps(w);
const __m128 vk00c0x0123 = _mm_load_ps(w + 4);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk00c0x0123, vi00c0));
const __m128 vk10c0x0123 = _mm_load_ps(w + 8);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk10c0x0123, vi10c0));
const __m128 vk20c0x0123 = _mm_load_ps(w + 12);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk20c0x0123, vi20c0));
const __m128 vk00c1x0123 = _mm_load_ps(w + 16);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk00c1x0123, vi00c1));
const __m128 vk10c1x0123 = _mm_load_ps(w + 20);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk10c1x0123, vi10c1));
const __m128 vk20c1x0123 = _mm_load_ps(w + 24);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk20c1x0123, vi20c1));
const __m128 vk00c2x0123 = _mm_load_ps(w + 28);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk00c2x0123, vi00c2));
const __m128 vk10c2x0123 = _mm_load_ps(w + 32);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk10c2x0123, vi10c2));
const __m128 vk20c2x0123 = _mm_load_ps(w + 36);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk20c2x0123, vi20c2));
const __m128 vk01c0x0123 = _mm_load_ps(w + 40);
const __m128 vi01c0 = _mm_load1_ps(i0);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk01c0x0123, vi01c0));
const __m128 vk11c0x0123 = _mm_load_ps(w + 44);
const __m128 vi11c0 = _mm_load1_ps(i1);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk11c0x0123, vi11c0));
const __m128 vk21c0x0123 = _mm_load_ps(w + 48);
const __m128 vi21c0 = _mm_load1_ps(i2);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk21c0x0123, vi21c0));
const __m128 vk01c1x0123 = _mm_load_ps(w + 52);
const __m128 vi01c1 = _mm_load1_ps(i0 + 1);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk01c1x0123, vi01c1));
const __m128 vk11c1x0123 = _mm_load_ps(w + 56);
const __m128 vi11c1 = _mm_load1_ps(i1 + 1);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk11c1x0123, vi11c1));
const __m128 vk21c1x0123 = _mm_load_ps(w + 60);
const __m128 vi21c1 = _mm_load1_ps(i2 + 1);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk21c1x0123, vi21c1));
const __m128 vk01c2x0123 = _mm_load_ps(w + 64);
const __m128 vi01c2 = _mm_load1_ps(i0 + 2);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk01c2x0123, vi01c2));
const __m128 vk11c2x0123 = _mm_load_ps(w + 68);
const __m128 vi11c2 = _mm_load1_ps(i1 + 2);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk11c2x0123, vi11c2));
const __m128 vk21c2x0123 = _mm_load_ps(w + 72);
const __m128 vi21c2 = _mm_load1_ps(i2 + 2);
voc0123 = _mm_add_ps(voc0123, _mm_mul_ps(vk21c2x0123, vi21c2));
voc0123 = _mm_min_ps(voc0123, vmax);
voc0123 = _mm_max_ps(voc0123, vmin);
_mm_store_ss(o0c0, voc0123); o0c0++;
_mm_store_ss(o0c1, _mm_shuffle_ps(voc0123, voc0123, 1)); o0c1++;
_mm_store_ss(o0c2, _mm_shuffle_ps(voc0123, voc0123, 2)); o0c2++;
_mm_store_ss(o0c3, _mm_shuffle_ps(voc0123, voc0123, 3)); o0c3++;
}
// Move output pointers back to the position of the first pixel in a row,
// and forward to the next block of output channels.
o0c0 = (float*) ((uintptr_t) o0c0 + output_channel_increment);
o0c1 = (float*) ((uintptr_t) o0c1 + output_channel_increment);
o0c2 = (float*) ((uintptr_t) o0c2 + output_channel_increment);
o0c3 = (float*) ((uintptr_t) o0c3 + output_channel_increment);
// Revert input pointers to the position of the first pixel in a row
i0 = (const float*) ((uintptr_t) i0 - input_width_decrement);
i1 = (const float*) ((uintptr_t) i1 - input_width_decrement);
i2 = (const float*) ((uintptr_t) i2 - input_width_decrement);
// Move to the block of weights for the next 4 output channels
w += 112;
c = doz(c, 4);
} while (c != 0);
// Move output pointers forward to the next row
output0 = (float*) ((uintptr_t) output0 + output_height_stride);
// Move input pointers forward to the next row
i0 = i2;
i1 = (const float*) ((uintptr_t) i0 + input_height_stride);
i2 = (const float*) ((uintptr_t) i1 + input_height_stride);
}
}
| 12,460 | 39.196774 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p16c-minmax-avx512f-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx512.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_dwconv_minmax_ukernel_25p16c__avx512f_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
__m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0);
i0 += 16;
const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 16);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1);
i1 += 16;
const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 32);
__m512 vacc0123456789ABCDEFp1 = _mm512_mul_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2);
i2 += 16;
const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 48);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi3x0123456789ABCDEF = _mm512_loadu_ps(i3);
i3 += 16;
const __m512 vk3x0123456789ABCDEF = _mm512_load_ps(w + 64);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi4x0123456789ABCDEF = _mm512_loadu_ps(i4);
i4 += 16;
const __m512 vk4x0123456789ABCDEF = _mm512_load_ps(w + 80);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi5x0123456789ABCDEF = _mm512_loadu_ps(i5);
i5 += 16;
const __m512 vk5x0123456789ABCDEF = _mm512_load_ps(w + 96);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi6x0123456789ABCDEF = _mm512_loadu_ps(i6);
i6 += 16;
const __m512 vk6x0123456789ABCDEF = _mm512_load_ps(w + 112);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi7x0123456789ABCDEF = _mm512_loadu_ps(i7);
i7 += 16;
const __m512 vk7x0123456789ABCDEF = _mm512_load_ps(w + 128);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi8x0123456789ABCDEF = _mm512_loadu_ps(i8);
i8 += 16;
const __m512 vk8x0123456789ABCDEF = _mm512_load_ps(w + 144);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi9x0123456789ABCDEF = _mm512_loadu_ps(i9);
i9 += 16;
const __m512 vk9x0123456789ABCDEF = _mm512_load_ps(w + 160);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi9x0123456789ABCDEF, vk9x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi10x0123456789ABCDEF = _mm512_loadu_ps(i10);
i10 += 16;
const __m512 vk10x0123456789ABCDEF = _mm512_load_ps(w + 176);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi10x0123456789ABCDEF, vk10x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi11x0123456789ABCDEF = _mm512_loadu_ps(i11);
i11 += 16;
const __m512 vk11x0123456789ABCDEF = _mm512_load_ps(w + 192);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi11x0123456789ABCDEF, vk11x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi12x0123456789ABCDEF = _mm512_loadu_ps(i12);
i12 += 16;
const __m512 vk12x0123456789ABCDEF = _mm512_load_ps(w + 208);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi12x0123456789ABCDEF, vk12x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi13x0123456789ABCDEF = _mm512_loadu_ps(i13);
i13 += 16;
const __m512 vk13x0123456789ABCDEF = _mm512_load_ps(w + 224);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi13x0123456789ABCDEF, vk13x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi14x0123456789ABCDEF = _mm512_loadu_ps(i14);
i14 += 16;
const __m512 vk14x0123456789ABCDEF = _mm512_load_ps(w + 240);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi14x0123456789ABCDEF, vk14x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi15x0123456789ABCDEF = _mm512_loadu_ps(i15);
i15 += 16;
const __m512 vk15x0123456789ABCDEF = _mm512_load_ps(w + 256);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi15x0123456789ABCDEF, vk15x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi16x0123456789ABCDEF = _mm512_loadu_ps(i16);
i16 += 16;
const __m512 vk16x0123456789ABCDEF = _mm512_load_ps(w + 272);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi16x0123456789ABCDEF, vk16x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi17x0123456789ABCDEF = _mm512_loadu_ps(i17);
i17 += 16;
const __m512 vk17x0123456789ABCDEF = _mm512_load_ps(w + 288);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi17x0123456789ABCDEF, vk17x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi18x0123456789ABCDEF = _mm512_loadu_ps(i18);
i18 += 16;
const __m512 vk18x0123456789ABCDEF = _mm512_load_ps(w + 304);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi18x0123456789ABCDEF, vk18x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi19x0123456789ABCDEF = _mm512_loadu_ps(i19);
i19 += 16;
const __m512 vk19x0123456789ABCDEF = _mm512_load_ps(w + 320);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi19x0123456789ABCDEF, vk19x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi20x0123456789ABCDEF = _mm512_loadu_ps(i20);
i20 += 16;
const __m512 vk20x0123456789ABCDEF = _mm512_load_ps(w + 336);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi20x0123456789ABCDEF, vk20x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi21x0123456789ABCDEF = _mm512_loadu_ps(i21);
i21 += 16;
const __m512 vk21x0123456789ABCDEF = _mm512_load_ps(w + 352);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi21x0123456789ABCDEF, vk21x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi22x0123456789ABCDEF = _mm512_loadu_ps(i22);
i22 += 16;
const __m512 vk22x0123456789ABCDEF = _mm512_load_ps(w + 368);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi22x0123456789ABCDEF, vk22x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi23x0123456789ABCDEF = _mm512_loadu_ps(i23);
i23 += 16;
const __m512 vk23x0123456789ABCDEF = _mm512_load_ps(w + 384);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi23x0123456789ABCDEF, vk23x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi24x0123456789ABCDEF = _mm512_loadu_ps(i24);
i24 += 16;
const __m512 vk24x0123456789ABCDEF = _mm512_load_ps(w + 400);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi24x0123456789ABCDEF, vk24x0123456789ABCDEF, vacc0123456789ABCDEFp0);
w += 416;
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc0123456789ABCDEFp0 = _mm512_add_ps(vacc0123456789ABCDEFp0, vacc0123456789ABCDEFp1);
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 16);
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
__m512 vacc0123456789ABCDEFp0 = _mm512_maskz_loadu_ps(vmask, w);
const __m512 vi0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i0);
const __m512 vk0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 16);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i1);
const __m512 vk1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 32);
__m512 vacc0123456789ABCDEFp1 = _mm512_mul_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
const __m512 vi2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i2);
const __m512 vk2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 48);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i3);
const __m512 vk3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 64);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi4x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i4);
const __m512 vk4x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 80);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi5x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i5);
const __m512 vk5x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 96);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi6x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i6);
const __m512 vk6x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 112);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi7x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i7);
const __m512 vk7x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 128);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi8x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i8);
const __m512 vk8x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 144);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi9x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i9);
const __m512 vk9x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 160);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi9x0123456789ABCDEF, vk9x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi10x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i10);
const __m512 vk10x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 176);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi10x0123456789ABCDEF, vk10x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi11x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i11);
const __m512 vk11x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 192);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi11x0123456789ABCDEF, vk11x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi12x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i12);
const __m512 vk12x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 208);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi12x0123456789ABCDEF, vk12x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi13x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i13);
const __m512 vk13x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 224);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi13x0123456789ABCDEF, vk13x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi14x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i14);
const __m512 vk14x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 240);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi14x0123456789ABCDEF, vk14x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi15x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i15);
const __m512 vk15x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 256);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi15x0123456789ABCDEF, vk15x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi16x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i16);
const __m512 vk16x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 272);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi16x0123456789ABCDEF, vk16x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi17x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i17);
const __m512 vk17x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 288);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi17x0123456789ABCDEF, vk17x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi18x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i18);
const __m512 vk18x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 304);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi18x0123456789ABCDEF, vk18x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi19x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i19);
const __m512 vk19x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 320);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi19x0123456789ABCDEF, vk19x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi20x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i20);
const __m512 vk20x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 336);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi20x0123456789ABCDEF, vk20x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi21x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i21);
const __m512 vk21x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 352);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi21x0123456789ABCDEF, vk21x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi22x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i22);
const __m512 vk22x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 368);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi22x0123456789ABCDEF, vk22x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi23x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i23);
const __m512 vk23x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 384);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi23x0123456789ABCDEF, vk23x0123456789ABCDEF, vacc0123456789ABCDEFp1);
const __m512 vi24x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i24);
const __m512 vk24x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 400);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi24x0123456789ABCDEF, vk24x0123456789ABCDEF, vacc0123456789ABCDEFp0);
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc0123456789ABCDEFp0 = _mm512_add_ps(vacc0123456789ABCDEFp0, vacc0123456789ABCDEFp1);
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_mask_storeu_ps(output, vmask, vacc0123456789ABCDEF);
output += c;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 20,017 | 43.385809 | 117 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p16c-minmax-avx512f.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx512.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_dwconv_minmax_ukernel_25p16c__avx512f(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
__m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0);
i0 += 16;
const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 16);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1);
i1 += 16;
const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 32);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2);
i2 += 16;
const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 48);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi3x0123456789ABCDEF = _mm512_loadu_ps(i3);
i3 += 16;
const __m512 vk3x0123456789ABCDEF = _mm512_load_ps(w + 64);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi4x0123456789ABCDEF = _mm512_loadu_ps(i4);
i4 += 16;
const __m512 vk4x0123456789ABCDEF = _mm512_load_ps(w + 80);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi5x0123456789ABCDEF = _mm512_loadu_ps(i5);
i5 += 16;
const __m512 vk5x0123456789ABCDEF = _mm512_load_ps(w + 96);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi6x0123456789ABCDEF = _mm512_loadu_ps(i6);
i6 += 16;
const __m512 vk6x0123456789ABCDEF = _mm512_load_ps(w + 112);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi7x0123456789ABCDEF = _mm512_loadu_ps(i7);
i7 += 16;
const __m512 vk7x0123456789ABCDEF = _mm512_load_ps(w + 128);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi8x0123456789ABCDEF = _mm512_loadu_ps(i8);
i8 += 16;
const __m512 vk8x0123456789ABCDEF = _mm512_load_ps(w + 144);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi9x0123456789ABCDEF = _mm512_loadu_ps(i9);
i9 += 16;
const __m512 vk9x0123456789ABCDEF = _mm512_load_ps(w + 160);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi9x0123456789ABCDEF, vk9x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi10x0123456789ABCDEF = _mm512_loadu_ps(i10);
i10 += 16;
const __m512 vk10x0123456789ABCDEF = _mm512_load_ps(w + 176);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi10x0123456789ABCDEF, vk10x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi11x0123456789ABCDEF = _mm512_loadu_ps(i11);
i11 += 16;
const __m512 vk11x0123456789ABCDEF = _mm512_load_ps(w + 192);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi11x0123456789ABCDEF, vk11x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi12x0123456789ABCDEF = _mm512_loadu_ps(i12);
i12 += 16;
const __m512 vk12x0123456789ABCDEF = _mm512_load_ps(w + 208);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi12x0123456789ABCDEF, vk12x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi13x0123456789ABCDEF = _mm512_loadu_ps(i13);
i13 += 16;
const __m512 vk13x0123456789ABCDEF = _mm512_load_ps(w + 224);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi13x0123456789ABCDEF, vk13x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi14x0123456789ABCDEF = _mm512_loadu_ps(i14);
i14 += 16;
const __m512 vk14x0123456789ABCDEF = _mm512_load_ps(w + 240);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi14x0123456789ABCDEF, vk14x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi15x0123456789ABCDEF = _mm512_loadu_ps(i15);
i15 += 16;
const __m512 vk15x0123456789ABCDEF = _mm512_load_ps(w + 256);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi15x0123456789ABCDEF, vk15x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi16x0123456789ABCDEF = _mm512_loadu_ps(i16);
i16 += 16;
const __m512 vk16x0123456789ABCDEF = _mm512_load_ps(w + 272);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi16x0123456789ABCDEF, vk16x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi17x0123456789ABCDEF = _mm512_loadu_ps(i17);
i17 += 16;
const __m512 vk17x0123456789ABCDEF = _mm512_load_ps(w + 288);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi17x0123456789ABCDEF, vk17x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi18x0123456789ABCDEF = _mm512_loadu_ps(i18);
i18 += 16;
const __m512 vk18x0123456789ABCDEF = _mm512_load_ps(w + 304);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi18x0123456789ABCDEF, vk18x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi19x0123456789ABCDEF = _mm512_loadu_ps(i19);
i19 += 16;
const __m512 vk19x0123456789ABCDEF = _mm512_load_ps(w + 320);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi19x0123456789ABCDEF, vk19x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi20x0123456789ABCDEF = _mm512_loadu_ps(i20);
i20 += 16;
const __m512 vk20x0123456789ABCDEF = _mm512_load_ps(w + 336);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi20x0123456789ABCDEF, vk20x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi21x0123456789ABCDEF = _mm512_loadu_ps(i21);
i21 += 16;
const __m512 vk21x0123456789ABCDEF = _mm512_load_ps(w + 352);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi21x0123456789ABCDEF, vk21x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi22x0123456789ABCDEF = _mm512_loadu_ps(i22);
i22 += 16;
const __m512 vk22x0123456789ABCDEF = _mm512_load_ps(w + 368);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi22x0123456789ABCDEF, vk22x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi23x0123456789ABCDEF = _mm512_loadu_ps(i23);
i23 += 16;
const __m512 vk23x0123456789ABCDEF = _mm512_load_ps(w + 384);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi23x0123456789ABCDEF, vk23x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi24x0123456789ABCDEF = _mm512_loadu_ps(i24);
i24 += 16;
const __m512 vk24x0123456789ABCDEF = _mm512_load_ps(w + 400);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi24x0123456789ABCDEF, vk24x0123456789ABCDEF, vacc0123456789ABCDEFp0);
w += 416;
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 16);
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
__m512 vacc0123456789ABCDEFp0 = _mm512_maskz_loadu_ps(vmask, w);
const __m512 vi0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i0);
const __m512 vk0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 16);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i1);
const __m512 vk1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 32);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i2);
const __m512 vk2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 48);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i3);
const __m512 vk3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 64);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi4x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i4);
const __m512 vk4x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 80);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi5x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i5);
const __m512 vk5x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 96);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi6x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i6);
const __m512 vk6x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 112);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi7x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i7);
const __m512 vk7x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 128);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi8x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i8);
const __m512 vk8x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 144);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi9x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i9);
const __m512 vk9x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 160);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi9x0123456789ABCDEF, vk9x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi10x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i10);
const __m512 vk10x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 176);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi10x0123456789ABCDEF, vk10x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi11x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i11);
const __m512 vk11x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 192);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi11x0123456789ABCDEF, vk11x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi12x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i12);
const __m512 vk12x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 208);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi12x0123456789ABCDEF, vk12x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi13x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i13);
const __m512 vk13x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 224);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi13x0123456789ABCDEF, vk13x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi14x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i14);
const __m512 vk14x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 240);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi14x0123456789ABCDEF, vk14x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi15x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i15);
const __m512 vk15x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 256);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi15x0123456789ABCDEF, vk15x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi16x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i16);
const __m512 vk16x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 272);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi16x0123456789ABCDEF, vk16x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi17x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i17);
const __m512 vk17x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 288);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi17x0123456789ABCDEF, vk17x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi18x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i18);
const __m512 vk18x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 304);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi18x0123456789ABCDEF, vk18x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi19x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i19);
const __m512 vk19x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 320);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi19x0123456789ABCDEF, vk19x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi20x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i20);
const __m512 vk20x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 336);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi20x0123456789ABCDEF, vk20x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi21x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i21);
const __m512 vk21x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 352);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi21x0123456789ABCDEF, vk21x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi22x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i22);
const __m512 vk22x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 368);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi22x0123456789ABCDEF, vk22x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi23x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i23);
const __m512 vk23x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 384);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi23x0123456789ABCDEF, vk23x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi24x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i24);
const __m512 vk24x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 400);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi24x0123456789ABCDEF, vk24x0123456789ABCDEF, vacc0123456789ABCDEFp0);
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_mask_storeu_ps(output, vmask, vacc0123456789ABCDEF);
output += c;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 19,744 | 43.17226 | 117 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p1c-minmax-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_25p1c__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
do {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[6];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
const float vi6 = *i6++;
const float vk6 = w[7];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[8];
vacc0p1 = math_muladd_f32(vi7, vk7, vacc0p1);
const float vi8 = *i8++;
const float vk8 = w[9];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
const float vi9 = *i9++;
const float vk9 = w[10];
vacc0p1 = math_muladd_f32(vi9, vk9, vacc0p1);
const float vi10 = *i10++;
const float vk10 = w[11];
vacc0p0 = math_muladd_f32(vi10, vk10, vacc0p0);
const float vi11 = *i11++;
const float vk11 = w[12];
vacc0p1 = math_muladd_f32(vi11, vk11, vacc0p1);
const float vi12 = *i12++;
const float vk12 = w[13];
vacc0p0 = math_muladd_f32(vi12, vk12, vacc0p0);
const float vi13 = *i13++;
const float vk13 = w[14];
vacc0p1 = math_muladd_f32(vi13, vk13, vacc0p1);
const float vi14 = *i14++;
const float vk14 = w[15];
vacc0p0 = math_muladd_f32(vi14, vk14, vacc0p0);
const float vi15 = *i15++;
const float vk15 = w[16];
vacc0p1 = math_muladd_f32(vi15, vk15, vacc0p1);
const float vi16 = *i16++;
const float vk16 = w[17];
vacc0p0 = math_muladd_f32(vi16, vk16, vacc0p0);
const float vi17 = *i17++;
const float vk17 = w[18];
vacc0p1 = math_muladd_f32(vi17, vk17, vacc0p1);
const float vi18 = *i18++;
const float vk18 = w[19];
vacc0p0 = math_muladd_f32(vi18, vk18, vacc0p0);
const float vi19 = *i19++;
const float vk19 = w[20];
vacc0p1 = math_muladd_f32(vi19, vk19, vacc0p1);
const float vi20 = *i20++;
const float vk20 = w[21];
vacc0p0 = math_muladd_f32(vi20, vk20, vacc0p0);
const float vi21 = *i21++;
const float vk21 = w[22];
vacc0p1 = math_muladd_f32(vi21, vk21, vacc0p1);
const float vi22 = *i22++;
const float vk22 = w[23];
vacc0p0 = math_muladd_f32(vi22, vk22, vacc0p0);
const float vi23 = *i23++;
const float vk23 = w[24];
vacc0p1 = math_muladd_f32(vi23, vk23, vacc0p1);
const float vi24 = *i24++;
const float vk24 = w[25];
vacc0p0 = math_muladd_f32(vi24, vk24, vacc0p0);
w += 26;
vacc0p0 += vacc0p1;
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,308 | 28.888489 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p1c-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_25p1c__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
do {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[6];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
const float vi6 = *i6++;
const float vk6 = w[7];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[8];
vacc0p0 = math_muladd_f32(vi7, vk7, vacc0p0);
const float vi8 = *i8++;
const float vk8 = w[9];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
const float vi9 = *i9++;
const float vk9 = w[10];
vacc0p0 = math_muladd_f32(vi9, vk9, vacc0p0);
const float vi10 = *i10++;
const float vk10 = w[11];
vacc0p0 = math_muladd_f32(vi10, vk10, vacc0p0);
const float vi11 = *i11++;
const float vk11 = w[12];
vacc0p0 = math_muladd_f32(vi11, vk11, vacc0p0);
const float vi12 = *i12++;
const float vk12 = w[13];
vacc0p0 = math_muladd_f32(vi12, vk12, vacc0p0);
const float vi13 = *i13++;
const float vk13 = w[14];
vacc0p0 = math_muladd_f32(vi13, vk13, vacc0p0);
const float vi14 = *i14++;
const float vk14 = w[15];
vacc0p0 = math_muladd_f32(vi14, vk14, vacc0p0);
const float vi15 = *i15++;
const float vk15 = w[16];
vacc0p0 = math_muladd_f32(vi15, vk15, vacc0p0);
const float vi16 = *i16++;
const float vk16 = w[17];
vacc0p0 = math_muladd_f32(vi16, vk16, vacc0p0);
const float vi17 = *i17++;
const float vk17 = w[18];
vacc0p0 = math_muladd_f32(vi17, vk17, vacc0p0);
const float vi18 = *i18++;
const float vk18 = w[19];
vacc0p0 = math_muladd_f32(vi18, vk18, vacc0p0);
const float vi19 = *i19++;
const float vk19 = w[20];
vacc0p0 = math_muladd_f32(vi19, vk19, vacc0p0);
const float vi20 = *i20++;
const float vk20 = w[21];
vacc0p0 = math_muladd_f32(vi20, vk20, vacc0p0);
const float vi21 = *i21++;
const float vk21 = w[22];
vacc0p0 = math_muladd_f32(vi21, vk21, vacc0p0);
const float vi22 = *i22++;
const float vk22 = w[23];
vacc0p0 = math_muladd_f32(vi22, vk22, vacc0p0);
const float vi23 = *i23++;
const float vk23 = w[24];
vacc0p0 = math_muladd_f32(vi23, vk23, vacc0p0);
const float vi24 = *i24++;
const float vk24 = w[25];
vacc0p0 = math_muladd_f32(vi24, vk24, vacc0p0);
w += 26;
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,296 | 28.953069 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p1c-minmax-wasm-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_25p1c__wasm_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
do {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[6];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
const float vi6 = *i6++;
const float vk6 = w[7];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[8];
vacc0p1 = math_muladd_f32(vi7, vk7, vacc0p1);
const float vi8 = *i8++;
const float vk8 = w[9];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
const float vi9 = *i9++;
const float vk9 = w[10];
vacc0p1 = math_muladd_f32(vi9, vk9, vacc0p1);
const float vi10 = *i10++;
const float vk10 = w[11];
vacc0p0 = math_muladd_f32(vi10, vk10, vacc0p0);
const float vi11 = *i11++;
const float vk11 = w[12];
vacc0p1 = math_muladd_f32(vi11, vk11, vacc0p1);
const float vi12 = *i12++;
const float vk12 = w[13];
vacc0p0 = math_muladd_f32(vi12, vk12, vacc0p0);
const float vi13 = *i13++;
const float vk13 = w[14];
vacc0p1 = math_muladd_f32(vi13, vk13, vacc0p1);
const float vi14 = *i14++;
const float vk14 = w[15];
vacc0p0 = math_muladd_f32(vi14, vk14, vacc0p0);
const float vi15 = *i15++;
const float vk15 = w[16];
vacc0p1 = math_muladd_f32(vi15, vk15, vacc0p1);
const float vi16 = *i16++;
const float vk16 = w[17];
vacc0p0 = math_muladd_f32(vi16, vk16, vacc0p0);
const float vi17 = *i17++;
const float vk17 = w[18];
vacc0p1 = math_muladd_f32(vi17, vk17, vacc0p1);
const float vi18 = *i18++;
const float vk18 = w[19];
vacc0p0 = math_muladd_f32(vi18, vk18, vacc0p0);
const float vi19 = *i19++;
const float vk19 = w[20];
vacc0p1 = math_muladd_f32(vi19, vk19, vacc0p1);
const float vi20 = *i20++;
const float vk20 = w[21];
vacc0p0 = math_muladd_f32(vi20, vk20, vacc0p0);
const float vi21 = *i21++;
const float vk21 = w[22];
vacc0p1 = math_muladd_f32(vi21, vk21, vacc0p1);
const float vi22 = *i22++;
const float vk22 = w[23];
vacc0p0 = math_muladd_f32(vi22, vk22, vacc0p0);
const float vi23 = *i23++;
const float vk23 = w[24];
vacc0p1 = math_muladd_f32(vi23, vk23, vacc0p1);
const float vi24 = *i24++;
const float vk24 = w[25];
vacc0p0 = math_muladd_f32(vi24, vk24, vacc0p0);
w += 26;
vacc0p0 += vacc0p1;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
*output++ = vacc0;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,326 | 28.953237 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p1c-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_25p1c__wasm(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
do {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[6];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
const float vi6 = *i6++;
const float vk6 = w[7];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[8];
vacc0p0 = math_muladd_f32(vi7, vk7, vacc0p0);
const float vi8 = *i8++;
const float vk8 = w[9];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
const float vi9 = *i9++;
const float vk9 = w[10];
vacc0p0 = math_muladd_f32(vi9, vk9, vacc0p0);
const float vi10 = *i10++;
const float vk10 = w[11];
vacc0p0 = math_muladd_f32(vi10, vk10, vacc0p0);
const float vi11 = *i11++;
const float vk11 = w[12];
vacc0p0 = math_muladd_f32(vi11, vk11, vacc0p0);
const float vi12 = *i12++;
const float vk12 = w[13];
vacc0p0 = math_muladd_f32(vi12, vk12, vacc0p0);
const float vi13 = *i13++;
const float vk13 = w[14];
vacc0p0 = math_muladd_f32(vi13, vk13, vacc0p0);
const float vi14 = *i14++;
const float vk14 = w[15];
vacc0p0 = math_muladd_f32(vi14, vk14, vacc0p0);
const float vi15 = *i15++;
const float vk15 = w[16];
vacc0p0 = math_muladd_f32(vi15, vk15, vacc0p0);
const float vi16 = *i16++;
const float vk16 = w[17];
vacc0p0 = math_muladd_f32(vi16, vk16, vacc0p0);
const float vi17 = *i17++;
const float vk17 = w[18];
vacc0p0 = math_muladd_f32(vi17, vk17, vacc0p0);
const float vi18 = *i18++;
const float vk18 = w[19];
vacc0p0 = math_muladd_f32(vi18, vk18, vacc0p0);
const float vi19 = *i19++;
const float vk19 = w[20];
vacc0p0 = math_muladd_f32(vi19, vk19, vacc0p0);
const float vi20 = *i20++;
const float vk20 = w[21];
vacc0p0 = math_muladd_f32(vi20, vk20, vacc0p0);
const float vi21 = *i21++;
const float vk21 = w[22];
vacc0p0 = math_muladd_f32(vi21, vk21, vacc0p0);
const float vi22 = *i22++;
const float vk22 = w[23];
vacc0p0 = math_muladd_f32(vi22, vk22, vacc0p0);
const float vi23 = *i23++;
const float vk23 = w[24];
vacc0p0 = math_muladd_f32(vi23, vk23, vacc0p0);
const float vi24 = *i24++;
const float vk24 = w[25];
vacc0p0 = math_muladd_f32(vi24, vk24, vacc0p0);
w += 26;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
*output++ = vacc0;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,314 | 29.018051 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p1c-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_25p1c__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
do {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[6];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
const float vi6 = *i6++;
const float vk6 = w[7];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[8];
vacc0p1 = math_muladd_f32(vi7, vk7, vacc0p1);
const float vi8 = *i8++;
const float vk8 = w[9];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
const float vi9 = *i9++;
const float vk9 = w[10];
vacc0p1 = math_muladd_f32(vi9, vk9, vacc0p1);
const float vi10 = *i10++;
const float vk10 = w[11];
vacc0p0 = math_muladd_f32(vi10, vk10, vacc0p0);
const float vi11 = *i11++;
const float vk11 = w[12];
vacc0p1 = math_muladd_f32(vi11, vk11, vacc0p1);
const float vi12 = *i12++;
const float vk12 = w[13];
vacc0p0 = math_muladd_f32(vi12, vk12, vacc0p0);
const float vi13 = *i13++;
const float vk13 = w[14];
vacc0p1 = math_muladd_f32(vi13, vk13, vacc0p1);
const float vi14 = *i14++;
const float vk14 = w[15];
vacc0p0 = math_muladd_f32(vi14, vk14, vacc0p0);
const float vi15 = *i15++;
const float vk15 = w[16];
vacc0p1 = math_muladd_f32(vi15, vk15, vacc0p1);
const float vi16 = *i16++;
const float vk16 = w[17];
vacc0p0 = math_muladd_f32(vi16, vk16, vacc0p0);
const float vi17 = *i17++;
const float vk17 = w[18];
vacc0p1 = math_muladd_f32(vi17, vk17, vacc0p1);
const float vi18 = *i18++;
const float vk18 = w[19];
vacc0p0 = math_muladd_f32(vi18, vk18, vacc0p0);
const float vi19 = *i19++;
const float vk19 = w[20];
vacc0p1 = math_muladd_f32(vi19, vk19, vacc0p1);
const float vi20 = *i20++;
const float vk20 = w[21];
vacc0p0 = math_muladd_f32(vi20, vk20, vacc0p0);
const float vi21 = *i21++;
const float vk21 = w[22];
vacc0p1 = math_muladd_f32(vi21, vk21, vacc0p1);
const float vi22 = *i22++;
const float vk22 = w[23];
vacc0p0 = math_muladd_f32(vi22, vk22, vacc0p0);
const float vi23 = *i23++;
const float vk23 = w[24];
vacc0p1 = math_muladd_f32(vi23, vk23, vacc0p1);
const float vi24 = *i24++;
const float vk24 = w[25];
vacc0p0 = math_muladd_f32(vi24, vk24, vacc0p0);
w += 26;
vacc0p0 += vacc0p1;
*output++ = vacc0p0;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,132 | 28.682482 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p1c-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_25p1c__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
do {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[5];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[6];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
const float vi6 = *i6++;
const float vk6 = w[7];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[8];
vacc0p0 = math_muladd_f32(vi7, vk7, vacc0p0);
const float vi8 = *i8++;
const float vk8 = w[9];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
const float vi9 = *i9++;
const float vk9 = w[10];
vacc0p0 = math_muladd_f32(vi9, vk9, vacc0p0);
const float vi10 = *i10++;
const float vk10 = w[11];
vacc0p0 = math_muladd_f32(vi10, vk10, vacc0p0);
const float vi11 = *i11++;
const float vk11 = w[12];
vacc0p0 = math_muladd_f32(vi11, vk11, vacc0p0);
const float vi12 = *i12++;
const float vk12 = w[13];
vacc0p0 = math_muladd_f32(vi12, vk12, vacc0p0);
const float vi13 = *i13++;
const float vk13 = w[14];
vacc0p0 = math_muladd_f32(vi13, vk13, vacc0p0);
const float vi14 = *i14++;
const float vk14 = w[15];
vacc0p0 = math_muladd_f32(vi14, vk14, vacc0p0);
const float vi15 = *i15++;
const float vk15 = w[16];
vacc0p0 = math_muladd_f32(vi15, vk15, vacc0p0);
const float vi16 = *i16++;
const float vk16 = w[17];
vacc0p0 = math_muladd_f32(vi16, vk16, vacc0p0);
const float vi17 = *i17++;
const float vk17 = w[18];
vacc0p0 = math_muladd_f32(vi17, vk17, vacc0p0);
const float vi18 = *i18++;
const float vk18 = w[19];
vacc0p0 = math_muladd_f32(vi18, vk18, vacc0p0);
const float vi19 = *i19++;
const float vk19 = w[20];
vacc0p0 = math_muladd_f32(vi19, vk19, vacc0p0);
const float vi20 = *i20++;
const float vk20 = w[21];
vacc0p0 = math_muladd_f32(vi20, vk20, vacc0p0);
const float vi21 = *i21++;
const float vk21 = w[22];
vacc0p0 = math_muladd_f32(vi21, vk21, vacc0p0);
const float vi22 = *i22++;
const float vk22 = w[23];
vacc0p0 = math_muladd_f32(vi22, vk22, vacc0p0);
const float vi23 = *i23++;
const float vk23 = w[24];
vacc0p0 = math_muladd_f32(vi23, vk23, vacc0p0);
const float vi24 = *i24++;
const float vk24 = w[25];
vacc0p0 = math_muladd_f32(vi24, vk24, vacc0p0);
w += 26;
*output++ = vacc0p0;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,120 | 28.747253 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p2c-minmax-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_25p2c__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 2; c -= 2) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
i0 += 2;
const float vk0x0 = w[2];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vk0x1 = w[3];
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
i1 += 2;
const float vk1x0 = w[4];
float vacc0p1 = vi1x0 * vk1x0;
const float vk1x1 = w[5];
float vacc1p1 = vi1x1 * vk1x1;
const float vi2x0 = i2[0];
const float vi2x1 = i2[1];
i2 += 2;
const float vk2x0 = w[6];
vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
const float vk2x1 = w[7];
vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
const float vi3x0 = i3[0];
const float vi3x1 = i3[1];
i3 += 2;
const float vk3x0 = w[8];
vacc0p1 = math_muladd_f32(vi3x0, vk3x0, vacc0p1);
const float vk3x1 = w[9];
vacc1p1 = math_muladd_f32(vi3x1, vk3x1, vacc1p1);
const float vi4x0 = i4[0];
const float vi4x1 = i4[1];
i4 += 2;
const float vk4x0 = w[10];
vacc0p0 = math_muladd_f32(vi4x0, vk4x0, vacc0p0);
const float vk4x1 = w[11];
vacc1p0 = math_muladd_f32(vi4x1, vk4x1, vacc1p0);
const float vi5x0 = i5[0];
const float vi5x1 = i5[1];
i5 += 2;
const float vk5x0 = w[12];
vacc0p1 = math_muladd_f32(vi5x0, vk5x0, vacc0p1);
const float vk5x1 = w[13];
vacc1p1 = math_muladd_f32(vi5x1, vk5x1, vacc1p1);
const float vi6x0 = i6[0];
const float vi6x1 = i6[1];
i6 += 2;
const float vk6x0 = w[14];
vacc0p0 = math_muladd_f32(vi6x0, vk6x0, vacc0p0);
const float vk6x1 = w[15];
vacc1p0 = math_muladd_f32(vi6x1, vk6x1, vacc1p0);
const float vi7x0 = i7[0];
const float vi7x1 = i7[1];
i7 += 2;
const float vk7x0 = w[16];
vacc0p1 = math_muladd_f32(vi7x0, vk7x0, vacc0p1);
const float vk7x1 = w[17];
vacc1p1 = math_muladd_f32(vi7x1, vk7x1, vacc1p1);
const float vi8x0 = i8[0];
const float vi8x1 = i8[1];
i8 += 2;
const float vk8x0 = w[18];
vacc0p0 = math_muladd_f32(vi8x0, vk8x0, vacc0p0);
const float vk8x1 = w[19];
vacc1p0 = math_muladd_f32(vi8x1, vk8x1, vacc1p0);
const float vi9x0 = i9[0];
const float vi9x1 = i9[1];
i9 += 2;
const float vk9x0 = w[20];
vacc0p1 = math_muladd_f32(vi9x0, vk9x0, vacc0p1);
const float vk9x1 = w[21];
vacc1p1 = math_muladd_f32(vi9x1, vk9x1, vacc1p1);
const float vi10x0 = i10[0];
const float vi10x1 = i10[1];
i10 += 2;
const float vk10x0 = w[22];
vacc0p0 = math_muladd_f32(vi10x0, vk10x0, vacc0p0);
const float vk10x1 = w[23];
vacc1p0 = math_muladd_f32(vi10x1, vk10x1, vacc1p0);
const float vi11x0 = i11[0];
const float vi11x1 = i11[1];
i11 += 2;
const float vk11x0 = w[24];
vacc0p1 = math_muladd_f32(vi11x0, vk11x0, vacc0p1);
const float vk11x1 = w[25];
vacc1p1 = math_muladd_f32(vi11x1, vk11x1, vacc1p1);
const float vi12x0 = i12[0];
const float vi12x1 = i12[1];
i12 += 2;
const float vk12x0 = w[26];
vacc0p0 = math_muladd_f32(vi12x0, vk12x0, vacc0p0);
const float vk12x1 = w[27];
vacc1p0 = math_muladd_f32(vi12x1, vk12x1, vacc1p0);
const float vi13x0 = i13[0];
const float vi13x1 = i13[1];
i13 += 2;
const float vk13x0 = w[28];
vacc0p1 = math_muladd_f32(vi13x0, vk13x0, vacc0p1);
const float vk13x1 = w[29];
vacc1p1 = math_muladd_f32(vi13x1, vk13x1, vacc1p1);
const float vi14x0 = i14[0];
const float vi14x1 = i14[1];
i14 += 2;
const float vk14x0 = w[30];
vacc0p0 = math_muladd_f32(vi14x0, vk14x0, vacc0p0);
const float vk14x1 = w[31];
vacc1p0 = math_muladd_f32(vi14x1, vk14x1, vacc1p0);
const float vi15x0 = i15[0];
const float vi15x1 = i15[1];
i15 += 2;
const float vk15x0 = w[32];
vacc0p1 = math_muladd_f32(vi15x0, vk15x0, vacc0p1);
const float vk15x1 = w[33];
vacc1p1 = math_muladd_f32(vi15x1, vk15x1, vacc1p1);
const float vi16x0 = i16[0];
const float vi16x1 = i16[1];
i16 += 2;
const float vk16x0 = w[34];
vacc0p0 = math_muladd_f32(vi16x0, vk16x0, vacc0p0);
const float vk16x1 = w[35];
vacc1p0 = math_muladd_f32(vi16x1, vk16x1, vacc1p0);
const float vi17x0 = i17[0];
const float vi17x1 = i17[1];
i17 += 2;
const float vk17x0 = w[36];
vacc0p1 = math_muladd_f32(vi17x0, vk17x0, vacc0p1);
const float vk17x1 = w[37];
vacc1p1 = math_muladd_f32(vi17x1, vk17x1, vacc1p1);
const float vi18x0 = i18[0];
const float vi18x1 = i18[1];
i18 += 2;
const float vk18x0 = w[38];
vacc0p0 = math_muladd_f32(vi18x0, vk18x0, vacc0p0);
const float vk18x1 = w[39];
vacc1p0 = math_muladd_f32(vi18x1, vk18x1, vacc1p0);
const float vi19x0 = i19[0];
const float vi19x1 = i19[1];
i19 += 2;
const float vk19x0 = w[40];
vacc0p1 = math_muladd_f32(vi19x0, vk19x0, vacc0p1);
const float vk19x1 = w[41];
vacc1p1 = math_muladd_f32(vi19x1, vk19x1, vacc1p1);
const float vi20x0 = i20[0];
const float vi20x1 = i20[1];
i20 += 2;
const float vk20x0 = w[42];
vacc0p0 = math_muladd_f32(vi20x0, vk20x0, vacc0p0);
const float vk20x1 = w[43];
vacc1p0 = math_muladd_f32(vi20x1, vk20x1, vacc1p0);
const float vi21x0 = i21[0];
const float vi21x1 = i21[1];
i21 += 2;
const float vk21x0 = w[44];
vacc0p1 = math_muladd_f32(vi21x0, vk21x0, vacc0p1);
const float vk21x1 = w[45];
vacc1p1 = math_muladd_f32(vi21x1, vk21x1, vacc1p1);
const float vi22x0 = i22[0];
const float vi22x1 = i22[1];
i22 += 2;
const float vk22x0 = w[46];
vacc0p0 = math_muladd_f32(vi22x0, vk22x0, vacc0p0);
const float vk22x1 = w[47];
vacc1p0 = math_muladd_f32(vi22x1, vk22x1, vacc1p0);
const float vi23x0 = i23[0];
const float vi23x1 = i23[1];
i23 += 2;
const float vk23x0 = w[48];
vacc0p1 = math_muladd_f32(vi23x0, vk23x0, vacc0p1);
const float vk23x1 = w[49];
vacc1p1 = math_muladd_f32(vi23x1, vk23x1, vacc1p1);
const float vi24x0 = i24[0];
const float vi24x1 = i24[1];
i24 += 2;
const float vk24x0 = w[50];
vacc0p0 = math_muladd_f32(vi24x0, vk24x0, vacc0p0);
const float vk24x1 = w[51];
vacc1p0 = math_muladd_f32(vi24x1, vk24x1, vacc1p0);
w += 52;
// Add up all accumulators to vacc01p0
vacc0p0 = vacc0p0 + vacc0p1;
vacc1p0 = vacc1p0 + vacc1p1;
float vacc0 = math_max_f32(vacc0p0, vmin);
float vacc1 = math_max_f32(vacc1p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
vacc1 = math_min_f32(vacc1, vmax);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
for (; c >= 1; c -= 1) {
float vacc0p0 = *w++;
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[3];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[5];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[7];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[9];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[11];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
const float vi6 = *i6++;
const float vk6 = w[13];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[15];
vacc0p1 = math_muladd_f32(vi7, vk7, vacc0p1);
const float vi8 = *i8++;
const float vk8 = w[17];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
const float vi9 = *i9++;
const float vk9 = w[19];
vacc0p1 = math_muladd_f32(vi9, vk9, vacc0p1);
const float vi10 = *i10++;
const float vk10 = w[21];
vacc0p0 = math_muladd_f32(vi10, vk10, vacc0p0);
const float vi11 = *i11++;
const float vk11 = w[23];
vacc0p1 = math_muladd_f32(vi11, vk11, vacc0p1);
const float vi12 = *i12++;
const float vk12 = w[25];
vacc0p0 = math_muladd_f32(vi12, vk12, vacc0p0);
const float vi13 = *i13++;
const float vk13 = w[27];
vacc0p1 = math_muladd_f32(vi13, vk13, vacc0p1);
const float vi14 = *i14++;
const float vk14 = w[29];
vacc0p0 = math_muladd_f32(vi14, vk14, vacc0p0);
const float vi15 = *i15++;
const float vk15 = w[31];
vacc0p1 = math_muladd_f32(vi15, vk15, vacc0p1);
const float vi16 = *i16++;
const float vk16 = w[33];
vacc0p0 = math_muladd_f32(vi16, vk16, vacc0p0);
const float vi17 = *i17++;
const float vk17 = w[35];
vacc0p1 = math_muladd_f32(vi17, vk17, vacc0p1);
const float vi18 = *i18++;
const float vk18 = w[37];
vacc0p0 = math_muladd_f32(vi18, vk18, vacc0p0);
const float vi19 = *i19++;
const float vk19 = w[39];
vacc0p1 = math_muladd_f32(vi19, vk19, vacc0p1);
const float vi20 = *i20++;
const float vk20 = w[41];
vacc0p0 = math_muladd_f32(vi20, vk20, vacc0p0);
const float vi21 = *i21++;
const float vk21 = w[43];
vacc0p1 = math_muladd_f32(vi21, vk21, vacc0p1);
const float vi22 = *i22++;
const float vk22 = w[45];
vacc0p0 = math_muladd_f32(vi22, vk22, vacc0p0);
const float vi23 = *i23++;
const float vk23 = w[47];
vacc0p1 = math_muladd_f32(vi23, vk23, vacc0p1);
const float vi24 = *i24++;
const float vk24 = w[49];
vacc0p0 = math_muladd_f32(vi24, vk24, vacc0p0);
// Add up all accumulators to vacc01p0
vacc0p0 = vacc0p0 + vacc0p1;
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,448 | 29.95992 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p2c-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_25p2c__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 2; c -= 2) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
i0 += 2;
const float vk0x0 = w[2];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vk0x1 = w[3];
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
i1 += 2;
const float vk1x0 = w[4];
vacc0p0 = math_muladd_f32(vi1x0, vk1x0, vacc0p0);
const float vk1x1 = w[5];
vacc1p0 = math_muladd_f32(vi1x1, vk1x1, vacc1p0);
const float vi2x0 = i2[0];
const float vi2x1 = i2[1];
i2 += 2;
const float vk2x0 = w[6];
vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
const float vk2x1 = w[7];
vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
const float vi3x0 = i3[0];
const float vi3x1 = i3[1];
i3 += 2;
const float vk3x0 = w[8];
vacc0p0 = math_muladd_f32(vi3x0, vk3x0, vacc0p0);
const float vk3x1 = w[9];
vacc1p0 = math_muladd_f32(vi3x1, vk3x1, vacc1p0);
const float vi4x0 = i4[0];
const float vi4x1 = i4[1];
i4 += 2;
const float vk4x0 = w[10];
vacc0p0 = math_muladd_f32(vi4x0, vk4x0, vacc0p0);
const float vk4x1 = w[11];
vacc1p0 = math_muladd_f32(vi4x1, vk4x1, vacc1p0);
const float vi5x0 = i5[0];
const float vi5x1 = i5[1];
i5 += 2;
const float vk5x0 = w[12];
vacc0p0 = math_muladd_f32(vi5x0, vk5x0, vacc0p0);
const float vk5x1 = w[13];
vacc1p0 = math_muladd_f32(vi5x1, vk5x1, vacc1p0);
const float vi6x0 = i6[0];
const float vi6x1 = i6[1];
i6 += 2;
const float vk6x0 = w[14];
vacc0p0 = math_muladd_f32(vi6x0, vk6x0, vacc0p0);
const float vk6x1 = w[15];
vacc1p0 = math_muladd_f32(vi6x1, vk6x1, vacc1p0);
const float vi7x0 = i7[0];
const float vi7x1 = i7[1];
i7 += 2;
const float vk7x0 = w[16];
vacc0p0 = math_muladd_f32(vi7x0, vk7x0, vacc0p0);
const float vk7x1 = w[17];
vacc1p0 = math_muladd_f32(vi7x1, vk7x1, vacc1p0);
const float vi8x0 = i8[0];
const float vi8x1 = i8[1];
i8 += 2;
const float vk8x0 = w[18];
vacc0p0 = math_muladd_f32(vi8x0, vk8x0, vacc0p0);
const float vk8x1 = w[19];
vacc1p0 = math_muladd_f32(vi8x1, vk8x1, vacc1p0);
const float vi9x0 = i9[0];
const float vi9x1 = i9[1];
i9 += 2;
const float vk9x0 = w[20];
vacc0p0 = math_muladd_f32(vi9x0, vk9x0, vacc0p0);
const float vk9x1 = w[21];
vacc1p0 = math_muladd_f32(vi9x1, vk9x1, vacc1p0);
const float vi10x0 = i10[0];
const float vi10x1 = i10[1];
i10 += 2;
const float vk10x0 = w[22];
vacc0p0 = math_muladd_f32(vi10x0, vk10x0, vacc0p0);
const float vk10x1 = w[23];
vacc1p0 = math_muladd_f32(vi10x1, vk10x1, vacc1p0);
const float vi11x0 = i11[0];
const float vi11x1 = i11[1];
i11 += 2;
const float vk11x0 = w[24];
vacc0p0 = math_muladd_f32(vi11x0, vk11x0, vacc0p0);
const float vk11x1 = w[25];
vacc1p0 = math_muladd_f32(vi11x1, vk11x1, vacc1p0);
const float vi12x0 = i12[0];
const float vi12x1 = i12[1];
i12 += 2;
const float vk12x0 = w[26];
vacc0p0 = math_muladd_f32(vi12x0, vk12x0, vacc0p0);
const float vk12x1 = w[27];
vacc1p0 = math_muladd_f32(vi12x1, vk12x1, vacc1p0);
const float vi13x0 = i13[0];
const float vi13x1 = i13[1];
i13 += 2;
const float vk13x0 = w[28];
vacc0p0 = math_muladd_f32(vi13x0, vk13x0, vacc0p0);
const float vk13x1 = w[29];
vacc1p0 = math_muladd_f32(vi13x1, vk13x1, vacc1p0);
const float vi14x0 = i14[0];
const float vi14x1 = i14[1];
i14 += 2;
const float vk14x0 = w[30];
vacc0p0 = math_muladd_f32(vi14x0, vk14x0, vacc0p0);
const float vk14x1 = w[31];
vacc1p0 = math_muladd_f32(vi14x1, vk14x1, vacc1p0);
const float vi15x0 = i15[0];
const float vi15x1 = i15[1];
i15 += 2;
const float vk15x0 = w[32];
vacc0p0 = math_muladd_f32(vi15x0, vk15x0, vacc0p0);
const float vk15x1 = w[33];
vacc1p0 = math_muladd_f32(vi15x1, vk15x1, vacc1p0);
const float vi16x0 = i16[0];
const float vi16x1 = i16[1];
i16 += 2;
const float vk16x0 = w[34];
vacc0p0 = math_muladd_f32(vi16x0, vk16x0, vacc0p0);
const float vk16x1 = w[35];
vacc1p0 = math_muladd_f32(vi16x1, vk16x1, vacc1p0);
const float vi17x0 = i17[0];
const float vi17x1 = i17[1];
i17 += 2;
const float vk17x0 = w[36];
vacc0p0 = math_muladd_f32(vi17x0, vk17x0, vacc0p0);
const float vk17x1 = w[37];
vacc1p0 = math_muladd_f32(vi17x1, vk17x1, vacc1p0);
const float vi18x0 = i18[0];
const float vi18x1 = i18[1];
i18 += 2;
const float vk18x0 = w[38];
vacc0p0 = math_muladd_f32(vi18x0, vk18x0, vacc0p0);
const float vk18x1 = w[39];
vacc1p0 = math_muladd_f32(vi18x1, vk18x1, vacc1p0);
const float vi19x0 = i19[0];
const float vi19x1 = i19[1];
i19 += 2;
const float vk19x0 = w[40];
vacc0p0 = math_muladd_f32(vi19x0, vk19x0, vacc0p0);
const float vk19x1 = w[41];
vacc1p0 = math_muladd_f32(vi19x1, vk19x1, vacc1p0);
const float vi20x0 = i20[0];
const float vi20x1 = i20[1];
i20 += 2;
const float vk20x0 = w[42];
vacc0p0 = math_muladd_f32(vi20x0, vk20x0, vacc0p0);
const float vk20x1 = w[43];
vacc1p0 = math_muladd_f32(vi20x1, vk20x1, vacc1p0);
const float vi21x0 = i21[0];
const float vi21x1 = i21[1];
i21 += 2;
const float vk21x0 = w[44];
vacc0p0 = math_muladd_f32(vi21x0, vk21x0, vacc0p0);
const float vk21x1 = w[45];
vacc1p0 = math_muladd_f32(vi21x1, vk21x1, vacc1p0);
const float vi22x0 = i22[0];
const float vi22x1 = i22[1];
i22 += 2;
const float vk22x0 = w[46];
vacc0p0 = math_muladd_f32(vi22x0, vk22x0, vacc0p0);
const float vk22x1 = w[47];
vacc1p0 = math_muladd_f32(vi22x1, vk22x1, vacc1p0);
const float vi23x0 = i23[0];
const float vi23x1 = i23[1];
i23 += 2;
const float vk23x0 = w[48];
vacc0p0 = math_muladd_f32(vi23x0, vk23x0, vacc0p0);
const float vk23x1 = w[49];
vacc1p0 = math_muladd_f32(vi23x1, vk23x1, vacc1p0);
const float vi24x0 = i24[0];
const float vi24x1 = i24[1];
i24 += 2;
const float vk24x0 = w[50];
vacc0p0 = math_muladd_f32(vi24x0, vk24x0, vacc0p0);
const float vk24x1 = w[51];
vacc1p0 = math_muladd_f32(vi24x1, vk24x1, vacc1p0);
w += 52;
float vacc0 = math_max_f32(vacc0p0, vmin);
float vacc1 = math_max_f32(vacc1p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
vacc1 = math_min_f32(vacc1, vmax);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
for (; c >= 1; c -= 1) {
float vacc0p0 = *w++;
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[3];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[5];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[7];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[9];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[11];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
const float vi6 = *i6++;
const float vk6 = w[13];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[15];
vacc0p0 = math_muladd_f32(vi7, vk7, vacc0p0);
const float vi8 = *i8++;
const float vk8 = w[17];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
const float vi9 = *i9++;
const float vk9 = w[19];
vacc0p0 = math_muladd_f32(vi9, vk9, vacc0p0);
const float vi10 = *i10++;
const float vk10 = w[21];
vacc0p0 = math_muladd_f32(vi10, vk10, vacc0p0);
const float vi11 = *i11++;
const float vk11 = w[23];
vacc0p0 = math_muladd_f32(vi11, vk11, vacc0p0);
const float vi12 = *i12++;
const float vk12 = w[25];
vacc0p0 = math_muladd_f32(vi12, vk12, vacc0p0);
const float vi13 = *i13++;
const float vk13 = w[27];
vacc0p0 = math_muladd_f32(vi13, vk13, vacc0p0);
const float vi14 = *i14++;
const float vk14 = w[29];
vacc0p0 = math_muladd_f32(vi14, vk14, vacc0p0);
const float vi15 = *i15++;
const float vk15 = w[31];
vacc0p0 = math_muladd_f32(vi15, vk15, vacc0p0);
const float vi16 = *i16++;
const float vk16 = w[33];
vacc0p0 = math_muladd_f32(vi16, vk16, vacc0p0);
const float vi17 = *i17++;
const float vk17 = w[35];
vacc0p0 = math_muladd_f32(vi17, vk17, vacc0p0);
const float vi18 = *i18++;
const float vk18 = w[37];
vacc0p0 = math_muladd_f32(vi18, vk18, vacc0p0);
const float vi19 = *i19++;
const float vk19 = w[39];
vacc0p0 = math_muladd_f32(vi19, vk19, vacc0p0);
const float vi20 = *i20++;
const float vk20 = w[41];
vacc0p0 = math_muladd_f32(vi20, vk20, vacc0p0);
const float vi21 = *i21++;
const float vk21 = w[43];
vacc0p0 = math_muladd_f32(vi21, vk21, vacc0p0);
const float vi22 = *i22++;
const float vk22 = w[45];
vacc0p0 = math_muladd_f32(vi22, vk22, vacc0p0);
const float vi23 = *i23++;
const float vk23 = w[47];
vacc0p0 = math_muladd_f32(vi23, vk23, vacc0p0);
const float vi24 = *i24++;
const float vk24 = w[49];
vacc0p0 = math_muladd_f32(vi24, vk24, vacc0p0);
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,305 | 29.983806 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p2c-minmax-wasm-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_25p2c__wasm_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 2; c -= 2) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
i0 += 2;
const float vk0x0 = w[2];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vk0x1 = w[3];
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
i1 += 2;
const float vk1x0 = w[4];
float vacc0p1 = vi1x0 * vk1x0;
const float vk1x1 = w[5];
float vacc1p1 = vi1x1 * vk1x1;
const float vi2x0 = i2[0];
const float vi2x1 = i2[1];
i2 += 2;
const float vk2x0 = w[6];
vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
const float vk2x1 = w[7];
vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
const float vi3x0 = i3[0];
const float vi3x1 = i3[1];
i3 += 2;
const float vk3x0 = w[8];
vacc0p1 = math_muladd_f32(vi3x0, vk3x0, vacc0p1);
const float vk3x1 = w[9];
vacc1p1 = math_muladd_f32(vi3x1, vk3x1, vacc1p1);
const float vi4x0 = i4[0];
const float vi4x1 = i4[1];
i4 += 2;
const float vk4x0 = w[10];
vacc0p0 = math_muladd_f32(vi4x0, vk4x0, vacc0p0);
const float vk4x1 = w[11];
vacc1p0 = math_muladd_f32(vi4x1, vk4x1, vacc1p0);
const float vi5x0 = i5[0];
const float vi5x1 = i5[1];
i5 += 2;
const float vk5x0 = w[12];
vacc0p1 = math_muladd_f32(vi5x0, vk5x0, vacc0p1);
const float vk5x1 = w[13];
vacc1p1 = math_muladd_f32(vi5x1, vk5x1, vacc1p1);
const float vi6x0 = i6[0];
const float vi6x1 = i6[1];
i6 += 2;
const float vk6x0 = w[14];
vacc0p0 = math_muladd_f32(vi6x0, vk6x0, vacc0p0);
const float vk6x1 = w[15];
vacc1p0 = math_muladd_f32(vi6x1, vk6x1, vacc1p0);
const float vi7x0 = i7[0];
const float vi7x1 = i7[1];
i7 += 2;
const float vk7x0 = w[16];
vacc0p1 = math_muladd_f32(vi7x0, vk7x0, vacc0p1);
const float vk7x1 = w[17];
vacc1p1 = math_muladd_f32(vi7x1, vk7x1, vacc1p1);
const float vi8x0 = i8[0];
const float vi8x1 = i8[1];
i8 += 2;
const float vk8x0 = w[18];
vacc0p0 = math_muladd_f32(vi8x0, vk8x0, vacc0p0);
const float vk8x1 = w[19];
vacc1p0 = math_muladd_f32(vi8x1, vk8x1, vacc1p0);
const float vi9x0 = i9[0];
const float vi9x1 = i9[1];
i9 += 2;
const float vk9x0 = w[20];
vacc0p1 = math_muladd_f32(vi9x0, vk9x0, vacc0p1);
const float vk9x1 = w[21];
vacc1p1 = math_muladd_f32(vi9x1, vk9x1, vacc1p1);
const float vi10x0 = i10[0];
const float vi10x1 = i10[1];
i10 += 2;
const float vk10x0 = w[22];
vacc0p0 = math_muladd_f32(vi10x0, vk10x0, vacc0p0);
const float vk10x1 = w[23];
vacc1p0 = math_muladd_f32(vi10x1, vk10x1, vacc1p0);
const float vi11x0 = i11[0];
const float vi11x1 = i11[1];
i11 += 2;
const float vk11x0 = w[24];
vacc0p1 = math_muladd_f32(vi11x0, vk11x0, vacc0p1);
const float vk11x1 = w[25];
vacc1p1 = math_muladd_f32(vi11x1, vk11x1, vacc1p1);
const float vi12x0 = i12[0];
const float vi12x1 = i12[1];
i12 += 2;
const float vk12x0 = w[26];
vacc0p0 = math_muladd_f32(vi12x0, vk12x0, vacc0p0);
const float vk12x1 = w[27];
vacc1p0 = math_muladd_f32(vi12x1, vk12x1, vacc1p0);
const float vi13x0 = i13[0];
const float vi13x1 = i13[1];
i13 += 2;
const float vk13x0 = w[28];
vacc0p1 = math_muladd_f32(vi13x0, vk13x0, vacc0p1);
const float vk13x1 = w[29];
vacc1p1 = math_muladd_f32(vi13x1, vk13x1, vacc1p1);
const float vi14x0 = i14[0];
const float vi14x1 = i14[1];
i14 += 2;
const float vk14x0 = w[30];
vacc0p0 = math_muladd_f32(vi14x0, vk14x0, vacc0p0);
const float vk14x1 = w[31];
vacc1p0 = math_muladd_f32(vi14x1, vk14x1, vacc1p0);
const float vi15x0 = i15[0];
const float vi15x1 = i15[1];
i15 += 2;
const float vk15x0 = w[32];
vacc0p1 = math_muladd_f32(vi15x0, vk15x0, vacc0p1);
const float vk15x1 = w[33];
vacc1p1 = math_muladd_f32(vi15x1, vk15x1, vacc1p1);
const float vi16x0 = i16[0];
const float vi16x1 = i16[1];
i16 += 2;
const float vk16x0 = w[34];
vacc0p0 = math_muladd_f32(vi16x0, vk16x0, vacc0p0);
const float vk16x1 = w[35];
vacc1p0 = math_muladd_f32(vi16x1, vk16x1, vacc1p0);
const float vi17x0 = i17[0];
const float vi17x1 = i17[1];
i17 += 2;
const float vk17x0 = w[36];
vacc0p1 = math_muladd_f32(vi17x0, vk17x0, vacc0p1);
const float vk17x1 = w[37];
vacc1p1 = math_muladd_f32(vi17x1, vk17x1, vacc1p1);
const float vi18x0 = i18[0];
const float vi18x1 = i18[1];
i18 += 2;
const float vk18x0 = w[38];
vacc0p0 = math_muladd_f32(vi18x0, vk18x0, vacc0p0);
const float vk18x1 = w[39];
vacc1p0 = math_muladd_f32(vi18x1, vk18x1, vacc1p0);
const float vi19x0 = i19[0];
const float vi19x1 = i19[1];
i19 += 2;
const float vk19x0 = w[40];
vacc0p1 = math_muladd_f32(vi19x0, vk19x0, vacc0p1);
const float vk19x1 = w[41];
vacc1p1 = math_muladd_f32(vi19x1, vk19x1, vacc1p1);
const float vi20x0 = i20[0];
const float vi20x1 = i20[1];
i20 += 2;
const float vk20x0 = w[42];
vacc0p0 = math_muladd_f32(vi20x0, vk20x0, vacc0p0);
const float vk20x1 = w[43];
vacc1p0 = math_muladd_f32(vi20x1, vk20x1, vacc1p0);
const float vi21x0 = i21[0];
const float vi21x1 = i21[1];
i21 += 2;
const float vk21x0 = w[44];
vacc0p1 = math_muladd_f32(vi21x0, vk21x0, vacc0p1);
const float vk21x1 = w[45];
vacc1p1 = math_muladd_f32(vi21x1, vk21x1, vacc1p1);
const float vi22x0 = i22[0];
const float vi22x1 = i22[1];
i22 += 2;
const float vk22x0 = w[46];
vacc0p0 = math_muladd_f32(vi22x0, vk22x0, vacc0p0);
const float vk22x1 = w[47];
vacc1p0 = math_muladd_f32(vi22x1, vk22x1, vacc1p0);
const float vi23x0 = i23[0];
const float vi23x1 = i23[1];
i23 += 2;
const float vk23x0 = w[48];
vacc0p1 = math_muladd_f32(vi23x0, vk23x0, vacc0p1);
const float vk23x1 = w[49];
vacc1p1 = math_muladd_f32(vi23x1, vk23x1, vacc1p1);
const float vi24x0 = i24[0];
const float vi24x1 = i24[1];
i24 += 2;
const float vk24x0 = w[50];
vacc0p0 = math_muladd_f32(vi24x0, vk24x0, vacc0p0);
const float vk24x1 = w[51];
vacc1p0 = math_muladd_f32(vi24x1, vk24x1, vacc1p0);
w += 52;
// Add up all accumulators to vacc01p0
vacc0p0 = vacc0p0 + vacc0p1;
vacc1p0 = vacc1p0 + vacc1p1;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
float vacc1 = __builtin_wasm_max_f32(vacc1p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
vacc1 = __builtin_wasm_min_f32(vacc1, vmax);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
for (; c >= 1; c -= 1) {
float vacc0p0 = *w++;
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[3];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[5];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[7];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[9];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[11];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
const float vi6 = *i6++;
const float vk6 = w[13];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[15];
vacc0p1 = math_muladd_f32(vi7, vk7, vacc0p1);
const float vi8 = *i8++;
const float vk8 = w[17];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
const float vi9 = *i9++;
const float vk9 = w[19];
vacc0p1 = math_muladd_f32(vi9, vk9, vacc0p1);
const float vi10 = *i10++;
const float vk10 = w[21];
vacc0p0 = math_muladd_f32(vi10, vk10, vacc0p0);
const float vi11 = *i11++;
const float vk11 = w[23];
vacc0p1 = math_muladd_f32(vi11, vk11, vacc0p1);
const float vi12 = *i12++;
const float vk12 = w[25];
vacc0p0 = math_muladd_f32(vi12, vk12, vacc0p0);
const float vi13 = *i13++;
const float vk13 = w[27];
vacc0p1 = math_muladd_f32(vi13, vk13, vacc0p1);
const float vi14 = *i14++;
const float vk14 = w[29];
vacc0p0 = math_muladd_f32(vi14, vk14, vacc0p0);
const float vi15 = *i15++;
const float vk15 = w[31];
vacc0p1 = math_muladd_f32(vi15, vk15, vacc0p1);
const float vi16 = *i16++;
const float vk16 = w[33];
vacc0p0 = math_muladd_f32(vi16, vk16, vacc0p0);
const float vi17 = *i17++;
const float vk17 = w[35];
vacc0p1 = math_muladd_f32(vi17, vk17, vacc0p1);
const float vi18 = *i18++;
const float vk18 = w[37];
vacc0p0 = math_muladd_f32(vi18, vk18, vacc0p0);
const float vi19 = *i19++;
const float vk19 = w[39];
vacc0p1 = math_muladd_f32(vi19, vk19, vacc0p1);
const float vi20 = *i20++;
const float vk20 = w[41];
vacc0p0 = math_muladd_f32(vi20, vk20, vacc0p0);
const float vi21 = *i21++;
const float vk21 = w[43];
vacc0p1 = math_muladd_f32(vi21, vk21, vacc0p1);
const float vi22 = *i22++;
const float vk22 = w[45];
vacc0p0 = math_muladd_f32(vi22, vk22, vacc0p0);
const float vi23 = *i23++;
const float vk23 = w[47];
vacc0p1 = math_muladd_f32(vi23, vk23, vacc0p1);
const float vi24 = *i24++;
const float vk24 = w[49];
vacc0p0 = math_muladd_f32(vi24, vk24, vacc0p0);
// Add up all accumulators to vacc01p0
vacc0p0 = vacc0p0 + vacc0p1;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
*output++ = vacc0;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,506 | 30.076152 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p2c-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_25p2c__wasm(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 2; c -= 2) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
i0 += 2;
const float vk0x0 = w[2];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vk0x1 = w[3];
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
i1 += 2;
const float vk1x0 = w[4];
vacc0p0 = math_muladd_f32(vi1x0, vk1x0, vacc0p0);
const float vk1x1 = w[5];
vacc1p0 = math_muladd_f32(vi1x1, vk1x1, vacc1p0);
const float vi2x0 = i2[0];
const float vi2x1 = i2[1];
i2 += 2;
const float vk2x0 = w[6];
vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
const float vk2x1 = w[7];
vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
const float vi3x0 = i3[0];
const float vi3x1 = i3[1];
i3 += 2;
const float vk3x0 = w[8];
vacc0p0 = math_muladd_f32(vi3x0, vk3x0, vacc0p0);
const float vk3x1 = w[9];
vacc1p0 = math_muladd_f32(vi3x1, vk3x1, vacc1p0);
const float vi4x0 = i4[0];
const float vi4x1 = i4[1];
i4 += 2;
const float vk4x0 = w[10];
vacc0p0 = math_muladd_f32(vi4x0, vk4x0, vacc0p0);
const float vk4x1 = w[11];
vacc1p0 = math_muladd_f32(vi4x1, vk4x1, vacc1p0);
const float vi5x0 = i5[0];
const float vi5x1 = i5[1];
i5 += 2;
const float vk5x0 = w[12];
vacc0p0 = math_muladd_f32(vi5x0, vk5x0, vacc0p0);
const float vk5x1 = w[13];
vacc1p0 = math_muladd_f32(vi5x1, vk5x1, vacc1p0);
const float vi6x0 = i6[0];
const float vi6x1 = i6[1];
i6 += 2;
const float vk6x0 = w[14];
vacc0p0 = math_muladd_f32(vi6x0, vk6x0, vacc0p0);
const float vk6x1 = w[15];
vacc1p0 = math_muladd_f32(vi6x1, vk6x1, vacc1p0);
const float vi7x0 = i7[0];
const float vi7x1 = i7[1];
i7 += 2;
const float vk7x0 = w[16];
vacc0p0 = math_muladd_f32(vi7x0, vk7x0, vacc0p0);
const float vk7x1 = w[17];
vacc1p0 = math_muladd_f32(vi7x1, vk7x1, vacc1p0);
const float vi8x0 = i8[0];
const float vi8x1 = i8[1];
i8 += 2;
const float vk8x0 = w[18];
vacc0p0 = math_muladd_f32(vi8x0, vk8x0, vacc0p0);
const float vk8x1 = w[19];
vacc1p0 = math_muladd_f32(vi8x1, vk8x1, vacc1p0);
const float vi9x0 = i9[0];
const float vi9x1 = i9[1];
i9 += 2;
const float vk9x0 = w[20];
vacc0p0 = math_muladd_f32(vi9x0, vk9x0, vacc0p0);
const float vk9x1 = w[21];
vacc1p0 = math_muladd_f32(vi9x1, vk9x1, vacc1p0);
const float vi10x0 = i10[0];
const float vi10x1 = i10[1];
i10 += 2;
const float vk10x0 = w[22];
vacc0p0 = math_muladd_f32(vi10x0, vk10x0, vacc0p0);
const float vk10x1 = w[23];
vacc1p0 = math_muladd_f32(vi10x1, vk10x1, vacc1p0);
const float vi11x0 = i11[0];
const float vi11x1 = i11[1];
i11 += 2;
const float vk11x0 = w[24];
vacc0p0 = math_muladd_f32(vi11x0, vk11x0, vacc0p0);
const float vk11x1 = w[25];
vacc1p0 = math_muladd_f32(vi11x1, vk11x1, vacc1p0);
const float vi12x0 = i12[0];
const float vi12x1 = i12[1];
i12 += 2;
const float vk12x0 = w[26];
vacc0p0 = math_muladd_f32(vi12x0, vk12x0, vacc0p0);
const float vk12x1 = w[27];
vacc1p0 = math_muladd_f32(vi12x1, vk12x1, vacc1p0);
const float vi13x0 = i13[0];
const float vi13x1 = i13[1];
i13 += 2;
const float vk13x0 = w[28];
vacc0p0 = math_muladd_f32(vi13x0, vk13x0, vacc0p0);
const float vk13x1 = w[29];
vacc1p0 = math_muladd_f32(vi13x1, vk13x1, vacc1p0);
const float vi14x0 = i14[0];
const float vi14x1 = i14[1];
i14 += 2;
const float vk14x0 = w[30];
vacc0p0 = math_muladd_f32(vi14x0, vk14x0, vacc0p0);
const float vk14x1 = w[31];
vacc1p0 = math_muladd_f32(vi14x1, vk14x1, vacc1p0);
const float vi15x0 = i15[0];
const float vi15x1 = i15[1];
i15 += 2;
const float vk15x0 = w[32];
vacc0p0 = math_muladd_f32(vi15x0, vk15x0, vacc0p0);
const float vk15x1 = w[33];
vacc1p0 = math_muladd_f32(vi15x1, vk15x1, vacc1p0);
const float vi16x0 = i16[0];
const float vi16x1 = i16[1];
i16 += 2;
const float vk16x0 = w[34];
vacc0p0 = math_muladd_f32(vi16x0, vk16x0, vacc0p0);
const float vk16x1 = w[35];
vacc1p0 = math_muladd_f32(vi16x1, vk16x1, vacc1p0);
const float vi17x0 = i17[0];
const float vi17x1 = i17[1];
i17 += 2;
const float vk17x0 = w[36];
vacc0p0 = math_muladd_f32(vi17x0, vk17x0, vacc0p0);
const float vk17x1 = w[37];
vacc1p0 = math_muladd_f32(vi17x1, vk17x1, vacc1p0);
const float vi18x0 = i18[0];
const float vi18x1 = i18[1];
i18 += 2;
const float vk18x0 = w[38];
vacc0p0 = math_muladd_f32(vi18x0, vk18x0, vacc0p0);
const float vk18x1 = w[39];
vacc1p0 = math_muladd_f32(vi18x1, vk18x1, vacc1p0);
const float vi19x0 = i19[0];
const float vi19x1 = i19[1];
i19 += 2;
const float vk19x0 = w[40];
vacc0p0 = math_muladd_f32(vi19x0, vk19x0, vacc0p0);
const float vk19x1 = w[41];
vacc1p0 = math_muladd_f32(vi19x1, vk19x1, vacc1p0);
const float vi20x0 = i20[0];
const float vi20x1 = i20[1];
i20 += 2;
const float vk20x0 = w[42];
vacc0p0 = math_muladd_f32(vi20x0, vk20x0, vacc0p0);
const float vk20x1 = w[43];
vacc1p0 = math_muladd_f32(vi20x1, vk20x1, vacc1p0);
const float vi21x0 = i21[0];
const float vi21x1 = i21[1];
i21 += 2;
const float vk21x0 = w[44];
vacc0p0 = math_muladd_f32(vi21x0, vk21x0, vacc0p0);
const float vk21x1 = w[45];
vacc1p0 = math_muladd_f32(vi21x1, vk21x1, vacc1p0);
const float vi22x0 = i22[0];
const float vi22x1 = i22[1];
i22 += 2;
const float vk22x0 = w[46];
vacc0p0 = math_muladd_f32(vi22x0, vk22x0, vacc0p0);
const float vk22x1 = w[47];
vacc1p0 = math_muladd_f32(vi22x1, vk22x1, vacc1p0);
const float vi23x0 = i23[0];
const float vi23x1 = i23[1];
i23 += 2;
const float vk23x0 = w[48];
vacc0p0 = math_muladd_f32(vi23x0, vk23x0, vacc0p0);
const float vk23x1 = w[49];
vacc1p0 = math_muladd_f32(vi23x1, vk23x1, vacc1p0);
const float vi24x0 = i24[0];
const float vi24x1 = i24[1];
i24 += 2;
const float vk24x0 = w[50];
vacc0p0 = math_muladd_f32(vi24x0, vk24x0, vacc0p0);
const float vk24x1 = w[51];
vacc1p0 = math_muladd_f32(vi24x1, vk24x1, vacc1p0);
w += 52;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
float vacc1 = __builtin_wasm_max_f32(vacc1p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
vacc1 = __builtin_wasm_min_f32(vacc1, vmax);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
for (; c >= 1; c -= 1) {
float vacc0p0 = *w++;
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[3];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[5];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[7];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[9];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[11];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
const float vi6 = *i6++;
const float vk6 = w[13];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[15];
vacc0p0 = math_muladd_f32(vi7, vk7, vacc0p0);
const float vi8 = *i8++;
const float vk8 = w[17];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
const float vi9 = *i9++;
const float vk9 = w[19];
vacc0p0 = math_muladd_f32(vi9, vk9, vacc0p0);
const float vi10 = *i10++;
const float vk10 = w[21];
vacc0p0 = math_muladd_f32(vi10, vk10, vacc0p0);
const float vi11 = *i11++;
const float vk11 = w[23];
vacc0p0 = math_muladd_f32(vi11, vk11, vacc0p0);
const float vi12 = *i12++;
const float vk12 = w[25];
vacc0p0 = math_muladd_f32(vi12, vk12, vacc0p0);
const float vi13 = *i13++;
const float vk13 = w[27];
vacc0p0 = math_muladd_f32(vi13, vk13, vacc0p0);
const float vi14 = *i14++;
const float vk14 = w[29];
vacc0p0 = math_muladd_f32(vi14, vk14, vacc0p0);
const float vi15 = *i15++;
const float vk15 = w[31];
vacc0p0 = math_muladd_f32(vi15, vk15, vacc0p0);
const float vi16 = *i16++;
const float vk16 = w[33];
vacc0p0 = math_muladd_f32(vi16, vk16, vacc0p0);
const float vi17 = *i17++;
const float vk17 = w[35];
vacc0p0 = math_muladd_f32(vi17, vk17, vacc0p0);
const float vi18 = *i18++;
const float vk18 = w[37];
vacc0p0 = math_muladd_f32(vi18, vk18, vacc0p0);
const float vi19 = *i19++;
const float vk19 = w[39];
vacc0p0 = math_muladd_f32(vi19, vk19, vacc0p0);
const float vi20 = *i20++;
const float vk20 = w[41];
vacc0p0 = math_muladd_f32(vi20, vk20, vacc0p0);
const float vi21 = *i21++;
const float vk21 = w[43];
vacc0p0 = math_muladd_f32(vi21, vk21, vacc0p0);
const float vi22 = *i22++;
const float vk22 = w[45];
vacc0p0 = math_muladd_f32(vi22, vk22, vacc0p0);
const float vi23 = *i23++;
const float vk23 = w[47];
vacc0p0 = math_muladd_f32(vi23, vk23, vacc0p0);
const float vi24 = *i24++;
const float vk24 = w[49];
vacc0p0 = math_muladd_f32(vi24, vk24, vacc0p0);
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
*output++ = vacc0;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,363 | 30.101215 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p2c-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_25p2c__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 2; c -= 2) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
i0 += 2;
const float vk0x0 = w[2];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vk0x1 = w[3];
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
i1 += 2;
const float vk1x0 = w[4];
float vacc0p1 = vi1x0 * vk1x0;
const float vk1x1 = w[5];
float vacc1p1 = vi1x1 * vk1x1;
const float vi2x0 = i2[0];
const float vi2x1 = i2[1];
i2 += 2;
const float vk2x0 = w[6];
vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
const float vk2x1 = w[7];
vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
const float vi3x0 = i3[0];
const float vi3x1 = i3[1];
i3 += 2;
const float vk3x0 = w[8];
vacc0p1 = math_muladd_f32(vi3x0, vk3x0, vacc0p1);
const float vk3x1 = w[9];
vacc1p1 = math_muladd_f32(vi3x1, vk3x1, vacc1p1);
const float vi4x0 = i4[0];
const float vi4x1 = i4[1];
i4 += 2;
const float vk4x0 = w[10];
vacc0p0 = math_muladd_f32(vi4x0, vk4x0, vacc0p0);
const float vk4x1 = w[11];
vacc1p0 = math_muladd_f32(vi4x1, vk4x1, vacc1p0);
const float vi5x0 = i5[0];
const float vi5x1 = i5[1];
i5 += 2;
const float vk5x0 = w[12];
vacc0p1 = math_muladd_f32(vi5x0, vk5x0, vacc0p1);
const float vk5x1 = w[13];
vacc1p1 = math_muladd_f32(vi5x1, vk5x1, vacc1p1);
const float vi6x0 = i6[0];
const float vi6x1 = i6[1];
i6 += 2;
const float vk6x0 = w[14];
vacc0p0 = math_muladd_f32(vi6x0, vk6x0, vacc0p0);
const float vk6x1 = w[15];
vacc1p0 = math_muladd_f32(vi6x1, vk6x1, vacc1p0);
const float vi7x0 = i7[0];
const float vi7x1 = i7[1];
i7 += 2;
const float vk7x0 = w[16];
vacc0p1 = math_muladd_f32(vi7x0, vk7x0, vacc0p1);
const float vk7x1 = w[17];
vacc1p1 = math_muladd_f32(vi7x1, vk7x1, vacc1p1);
const float vi8x0 = i8[0];
const float vi8x1 = i8[1];
i8 += 2;
const float vk8x0 = w[18];
vacc0p0 = math_muladd_f32(vi8x0, vk8x0, vacc0p0);
const float vk8x1 = w[19];
vacc1p0 = math_muladd_f32(vi8x1, vk8x1, vacc1p0);
const float vi9x0 = i9[0];
const float vi9x1 = i9[1];
i9 += 2;
const float vk9x0 = w[20];
vacc0p1 = math_muladd_f32(vi9x0, vk9x0, vacc0p1);
const float vk9x1 = w[21];
vacc1p1 = math_muladd_f32(vi9x1, vk9x1, vacc1p1);
const float vi10x0 = i10[0];
const float vi10x1 = i10[1];
i10 += 2;
const float vk10x0 = w[22];
vacc0p0 = math_muladd_f32(vi10x0, vk10x0, vacc0p0);
const float vk10x1 = w[23];
vacc1p0 = math_muladd_f32(vi10x1, vk10x1, vacc1p0);
const float vi11x0 = i11[0];
const float vi11x1 = i11[1];
i11 += 2;
const float vk11x0 = w[24];
vacc0p1 = math_muladd_f32(vi11x0, vk11x0, vacc0p1);
const float vk11x1 = w[25];
vacc1p1 = math_muladd_f32(vi11x1, vk11x1, vacc1p1);
const float vi12x0 = i12[0];
const float vi12x1 = i12[1];
i12 += 2;
const float vk12x0 = w[26];
vacc0p0 = math_muladd_f32(vi12x0, vk12x0, vacc0p0);
const float vk12x1 = w[27];
vacc1p0 = math_muladd_f32(vi12x1, vk12x1, vacc1p0);
const float vi13x0 = i13[0];
const float vi13x1 = i13[1];
i13 += 2;
const float vk13x0 = w[28];
vacc0p1 = math_muladd_f32(vi13x0, vk13x0, vacc0p1);
const float vk13x1 = w[29];
vacc1p1 = math_muladd_f32(vi13x1, vk13x1, vacc1p1);
const float vi14x0 = i14[0];
const float vi14x1 = i14[1];
i14 += 2;
const float vk14x0 = w[30];
vacc0p0 = math_muladd_f32(vi14x0, vk14x0, vacc0p0);
const float vk14x1 = w[31];
vacc1p0 = math_muladd_f32(vi14x1, vk14x1, vacc1p0);
const float vi15x0 = i15[0];
const float vi15x1 = i15[1];
i15 += 2;
const float vk15x0 = w[32];
vacc0p1 = math_muladd_f32(vi15x0, vk15x0, vacc0p1);
const float vk15x1 = w[33];
vacc1p1 = math_muladd_f32(vi15x1, vk15x1, vacc1p1);
const float vi16x0 = i16[0];
const float vi16x1 = i16[1];
i16 += 2;
const float vk16x0 = w[34];
vacc0p0 = math_muladd_f32(vi16x0, vk16x0, vacc0p0);
const float vk16x1 = w[35];
vacc1p0 = math_muladd_f32(vi16x1, vk16x1, vacc1p0);
const float vi17x0 = i17[0];
const float vi17x1 = i17[1];
i17 += 2;
const float vk17x0 = w[36];
vacc0p1 = math_muladd_f32(vi17x0, vk17x0, vacc0p1);
const float vk17x1 = w[37];
vacc1p1 = math_muladd_f32(vi17x1, vk17x1, vacc1p1);
const float vi18x0 = i18[0];
const float vi18x1 = i18[1];
i18 += 2;
const float vk18x0 = w[38];
vacc0p0 = math_muladd_f32(vi18x0, vk18x0, vacc0p0);
const float vk18x1 = w[39];
vacc1p0 = math_muladd_f32(vi18x1, vk18x1, vacc1p0);
const float vi19x0 = i19[0];
const float vi19x1 = i19[1];
i19 += 2;
const float vk19x0 = w[40];
vacc0p1 = math_muladd_f32(vi19x0, vk19x0, vacc0p1);
const float vk19x1 = w[41];
vacc1p1 = math_muladd_f32(vi19x1, vk19x1, vacc1p1);
const float vi20x0 = i20[0];
const float vi20x1 = i20[1];
i20 += 2;
const float vk20x0 = w[42];
vacc0p0 = math_muladd_f32(vi20x0, vk20x0, vacc0p0);
const float vk20x1 = w[43];
vacc1p0 = math_muladd_f32(vi20x1, vk20x1, vacc1p0);
const float vi21x0 = i21[0];
const float vi21x1 = i21[1];
i21 += 2;
const float vk21x0 = w[44];
vacc0p1 = math_muladd_f32(vi21x0, vk21x0, vacc0p1);
const float vk21x1 = w[45];
vacc1p1 = math_muladd_f32(vi21x1, vk21x1, vacc1p1);
const float vi22x0 = i22[0];
const float vi22x1 = i22[1];
i22 += 2;
const float vk22x0 = w[46];
vacc0p0 = math_muladd_f32(vi22x0, vk22x0, vacc0p0);
const float vk22x1 = w[47];
vacc1p0 = math_muladd_f32(vi22x1, vk22x1, vacc1p0);
const float vi23x0 = i23[0];
const float vi23x1 = i23[1];
i23 += 2;
const float vk23x0 = w[48];
vacc0p1 = math_muladd_f32(vi23x0, vk23x0, vacc0p1);
const float vk23x1 = w[49];
vacc1p1 = math_muladd_f32(vi23x1, vk23x1, vacc1p1);
const float vi24x0 = i24[0];
const float vi24x1 = i24[1];
i24 += 2;
const float vk24x0 = w[50];
vacc0p0 = math_muladd_f32(vi24x0, vk24x0, vacc0p0);
const float vk24x1 = w[51];
vacc1p0 = math_muladd_f32(vi24x1, vk24x1, vacc1p0);
w += 52;
// Add up all accumulators to vacc01p0
vacc0p0 = vacc0p0 + vacc0p1;
vacc1p0 = vacc1p0 + vacc1p1;
output[0] = vacc0p0;
output[1] = vacc1p0;
output += 2;
}
for (; c >= 1; c -= 1) {
float vacc0p0 = *w++;
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[3];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[5];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[7];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
const float vi4 = *i4++;
const float vk4 = w[9];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[11];
vacc0p1 = math_muladd_f32(vi5, vk5, vacc0p1);
const float vi6 = *i6++;
const float vk6 = w[13];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[15];
vacc0p1 = math_muladd_f32(vi7, vk7, vacc0p1);
const float vi8 = *i8++;
const float vk8 = w[17];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
const float vi9 = *i9++;
const float vk9 = w[19];
vacc0p1 = math_muladd_f32(vi9, vk9, vacc0p1);
const float vi10 = *i10++;
const float vk10 = w[21];
vacc0p0 = math_muladd_f32(vi10, vk10, vacc0p0);
const float vi11 = *i11++;
const float vk11 = w[23];
vacc0p1 = math_muladd_f32(vi11, vk11, vacc0p1);
const float vi12 = *i12++;
const float vk12 = w[25];
vacc0p0 = math_muladd_f32(vi12, vk12, vacc0p0);
const float vi13 = *i13++;
const float vk13 = w[27];
vacc0p1 = math_muladd_f32(vi13, vk13, vacc0p1);
const float vi14 = *i14++;
const float vk14 = w[29];
vacc0p0 = math_muladd_f32(vi14, vk14, vacc0p0);
const float vi15 = *i15++;
const float vk15 = w[31];
vacc0p1 = math_muladd_f32(vi15, vk15, vacc0p1);
const float vi16 = *i16++;
const float vk16 = w[33];
vacc0p0 = math_muladd_f32(vi16, vk16, vacc0p0);
const float vi17 = *i17++;
const float vk17 = w[35];
vacc0p1 = math_muladd_f32(vi17, vk17, vacc0p1);
const float vi18 = *i18++;
const float vk18 = w[37];
vacc0p0 = math_muladd_f32(vi18, vk18, vacc0p0);
const float vi19 = *i19++;
const float vk19 = w[39];
vacc0p1 = math_muladd_f32(vi19, vk19, vacc0p1);
const float vi20 = *i20++;
const float vk20 = w[41];
vacc0p0 = math_muladd_f32(vi20, vk20, vacc0p0);
const float vi21 = *i21++;
const float vk21 = w[43];
vacc0p1 = math_muladd_f32(vi21, vk21, vacc0p1);
const float vi22 = *i22++;
const float vk22 = w[45];
vacc0p0 = math_muladd_f32(vi22, vk22, vacc0p0);
const float vi23 = *i23++;
const float vk23 = w[47];
vacc0p1 = math_muladd_f32(vi23, vk23, vacc0p1);
const float vi24 = *i24++;
const float vk24 = w[49];
vacc0p0 = math_muladd_f32(vi24, vk24, vacc0p0);
// Add up all accumulators to vacc01p0
vacc0p0 = vacc0p0 + vacc0p1;
*output++ = vacc0p0;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,094 | 29.869121 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p2c-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_25p2c__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 2; c -= 2) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
i0 += 2;
const float vk0x0 = w[2];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vk0x1 = w[3];
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
i1 += 2;
const float vk1x0 = w[4];
vacc0p0 = math_muladd_f32(vi1x0, vk1x0, vacc0p0);
const float vk1x1 = w[5];
vacc1p0 = math_muladd_f32(vi1x1, vk1x1, vacc1p0);
const float vi2x0 = i2[0];
const float vi2x1 = i2[1];
i2 += 2;
const float vk2x0 = w[6];
vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
const float vk2x1 = w[7];
vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
const float vi3x0 = i3[0];
const float vi3x1 = i3[1];
i3 += 2;
const float vk3x0 = w[8];
vacc0p0 = math_muladd_f32(vi3x0, vk3x0, vacc0p0);
const float vk3x1 = w[9];
vacc1p0 = math_muladd_f32(vi3x1, vk3x1, vacc1p0);
const float vi4x0 = i4[0];
const float vi4x1 = i4[1];
i4 += 2;
const float vk4x0 = w[10];
vacc0p0 = math_muladd_f32(vi4x0, vk4x0, vacc0p0);
const float vk4x1 = w[11];
vacc1p0 = math_muladd_f32(vi4x1, vk4x1, vacc1p0);
const float vi5x0 = i5[0];
const float vi5x1 = i5[1];
i5 += 2;
const float vk5x0 = w[12];
vacc0p0 = math_muladd_f32(vi5x0, vk5x0, vacc0p0);
const float vk5x1 = w[13];
vacc1p0 = math_muladd_f32(vi5x1, vk5x1, vacc1p0);
const float vi6x0 = i6[0];
const float vi6x1 = i6[1];
i6 += 2;
const float vk6x0 = w[14];
vacc0p0 = math_muladd_f32(vi6x0, vk6x0, vacc0p0);
const float vk6x1 = w[15];
vacc1p0 = math_muladd_f32(vi6x1, vk6x1, vacc1p0);
const float vi7x0 = i7[0];
const float vi7x1 = i7[1];
i7 += 2;
const float vk7x0 = w[16];
vacc0p0 = math_muladd_f32(vi7x0, vk7x0, vacc0p0);
const float vk7x1 = w[17];
vacc1p0 = math_muladd_f32(vi7x1, vk7x1, vacc1p0);
const float vi8x0 = i8[0];
const float vi8x1 = i8[1];
i8 += 2;
const float vk8x0 = w[18];
vacc0p0 = math_muladd_f32(vi8x0, vk8x0, vacc0p0);
const float vk8x1 = w[19];
vacc1p0 = math_muladd_f32(vi8x1, vk8x1, vacc1p0);
const float vi9x0 = i9[0];
const float vi9x1 = i9[1];
i9 += 2;
const float vk9x0 = w[20];
vacc0p0 = math_muladd_f32(vi9x0, vk9x0, vacc0p0);
const float vk9x1 = w[21];
vacc1p0 = math_muladd_f32(vi9x1, vk9x1, vacc1p0);
const float vi10x0 = i10[0];
const float vi10x1 = i10[1];
i10 += 2;
const float vk10x0 = w[22];
vacc0p0 = math_muladd_f32(vi10x0, vk10x0, vacc0p0);
const float vk10x1 = w[23];
vacc1p0 = math_muladd_f32(vi10x1, vk10x1, vacc1p0);
const float vi11x0 = i11[0];
const float vi11x1 = i11[1];
i11 += 2;
const float vk11x0 = w[24];
vacc0p0 = math_muladd_f32(vi11x0, vk11x0, vacc0p0);
const float vk11x1 = w[25];
vacc1p0 = math_muladd_f32(vi11x1, vk11x1, vacc1p0);
const float vi12x0 = i12[0];
const float vi12x1 = i12[1];
i12 += 2;
const float vk12x0 = w[26];
vacc0p0 = math_muladd_f32(vi12x0, vk12x0, vacc0p0);
const float vk12x1 = w[27];
vacc1p0 = math_muladd_f32(vi12x1, vk12x1, vacc1p0);
const float vi13x0 = i13[0];
const float vi13x1 = i13[1];
i13 += 2;
const float vk13x0 = w[28];
vacc0p0 = math_muladd_f32(vi13x0, vk13x0, vacc0p0);
const float vk13x1 = w[29];
vacc1p0 = math_muladd_f32(vi13x1, vk13x1, vacc1p0);
const float vi14x0 = i14[0];
const float vi14x1 = i14[1];
i14 += 2;
const float vk14x0 = w[30];
vacc0p0 = math_muladd_f32(vi14x0, vk14x0, vacc0p0);
const float vk14x1 = w[31];
vacc1p0 = math_muladd_f32(vi14x1, vk14x1, vacc1p0);
const float vi15x0 = i15[0];
const float vi15x1 = i15[1];
i15 += 2;
const float vk15x0 = w[32];
vacc0p0 = math_muladd_f32(vi15x0, vk15x0, vacc0p0);
const float vk15x1 = w[33];
vacc1p0 = math_muladd_f32(vi15x1, vk15x1, vacc1p0);
const float vi16x0 = i16[0];
const float vi16x1 = i16[1];
i16 += 2;
const float vk16x0 = w[34];
vacc0p0 = math_muladd_f32(vi16x0, vk16x0, vacc0p0);
const float vk16x1 = w[35];
vacc1p0 = math_muladd_f32(vi16x1, vk16x1, vacc1p0);
const float vi17x0 = i17[0];
const float vi17x1 = i17[1];
i17 += 2;
const float vk17x0 = w[36];
vacc0p0 = math_muladd_f32(vi17x0, vk17x0, vacc0p0);
const float vk17x1 = w[37];
vacc1p0 = math_muladd_f32(vi17x1, vk17x1, vacc1p0);
const float vi18x0 = i18[0];
const float vi18x1 = i18[1];
i18 += 2;
const float vk18x0 = w[38];
vacc0p0 = math_muladd_f32(vi18x0, vk18x0, vacc0p0);
const float vk18x1 = w[39];
vacc1p0 = math_muladd_f32(vi18x1, vk18x1, vacc1p0);
const float vi19x0 = i19[0];
const float vi19x1 = i19[1];
i19 += 2;
const float vk19x0 = w[40];
vacc0p0 = math_muladd_f32(vi19x0, vk19x0, vacc0p0);
const float vk19x1 = w[41];
vacc1p0 = math_muladd_f32(vi19x1, vk19x1, vacc1p0);
const float vi20x0 = i20[0];
const float vi20x1 = i20[1];
i20 += 2;
const float vk20x0 = w[42];
vacc0p0 = math_muladd_f32(vi20x0, vk20x0, vacc0p0);
const float vk20x1 = w[43];
vacc1p0 = math_muladd_f32(vi20x1, vk20x1, vacc1p0);
const float vi21x0 = i21[0];
const float vi21x1 = i21[1];
i21 += 2;
const float vk21x0 = w[44];
vacc0p0 = math_muladd_f32(vi21x0, vk21x0, vacc0p0);
const float vk21x1 = w[45];
vacc1p0 = math_muladd_f32(vi21x1, vk21x1, vacc1p0);
const float vi22x0 = i22[0];
const float vi22x1 = i22[1];
i22 += 2;
const float vk22x0 = w[46];
vacc0p0 = math_muladd_f32(vi22x0, vk22x0, vacc0p0);
const float vk22x1 = w[47];
vacc1p0 = math_muladd_f32(vi22x1, vk22x1, vacc1p0);
const float vi23x0 = i23[0];
const float vi23x1 = i23[1];
i23 += 2;
const float vk23x0 = w[48];
vacc0p0 = math_muladd_f32(vi23x0, vk23x0, vacc0p0);
const float vk23x1 = w[49];
vacc1p0 = math_muladd_f32(vi23x1, vk23x1, vacc1p0);
const float vi24x0 = i24[0];
const float vi24x1 = i24[1];
i24 += 2;
const float vk24x0 = w[50];
vacc0p0 = math_muladd_f32(vi24x0, vk24x0, vacc0p0);
const float vk24x1 = w[51];
vacc1p0 = math_muladd_f32(vi24x1, vk24x1, vacc1p0);
w += 52;
output[0] = vacc0p0;
output[1] = vacc1p0;
output += 2;
}
for (; c >= 1; c -= 1) {
float vacc0p0 = *w++;
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[3];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[5];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[7];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
const float vi4 = *i4++;
const float vk4 = w[9];
vacc0p0 = math_muladd_f32(vi4, vk4, vacc0p0);
const float vi5 = *i5++;
const float vk5 = w[11];
vacc0p0 = math_muladd_f32(vi5, vk5, vacc0p0);
const float vi6 = *i6++;
const float vk6 = w[13];
vacc0p0 = math_muladd_f32(vi6, vk6, vacc0p0);
const float vi7 = *i7++;
const float vk7 = w[15];
vacc0p0 = math_muladd_f32(vi7, vk7, vacc0p0);
const float vi8 = *i8++;
const float vk8 = w[17];
vacc0p0 = math_muladd_f32(vi8, vk8, vacc0p0);
const float vi9 = *i9++;
const float vk9 = w[19];
vacc0p0 = math_muladd_f32(vi9, vk9, vacc0p0);
const float vi10 = *i10++;
const float vk10 = w[21];
vacc0p0 = math_muladd_f32(vi10, vk10, vacc0p0);
const float vi11 = *i11++;
const float vk11 = w[23];
vacc0p0 = math_muladd_f32(vi11, vk11, vacc0p0);
const float vi12 = *i12++;
const float vk12 = w[25];
vacc0p0 = math_muladd_f32(vi12, vk12, vacc0p0);
const float vi13 = *i13++;
const float vk13 = w[27];
vacc0p0 = math_muladd_f32(vi13, vk13, vacc0p0);
const float vi14 = *i14++;
const float vk14 = w[29];
vacc0p0 = math_muladd_f32(vi14, vk14, vacc0p0);
const float vi15 = *i15++;
const float vk15 = w[31];
vacc0p0 = math_muladd_f32(vi15, vk15, vacc0p0);
const float vi16 = *i16++;
const float vk16 = w[33];
vacc0p0 = math_muladd_f32(vi16, vk16, vacc0p0);
const float vi17 = *i17++;
const float vk17 = w[35];
vacc0p0 = math_muladd_f32(vi17, vk17, vacc0p0);
const float vi18 = *i18++;
const float vk18 = w[37];
vacc0p0 = math_muladd_f32(vi18, vk18, vacc0p0);
const float vi19 = *i19++;
const float vk19 = w[39];
vacc0p0 = math_muladd_f32(vi19, vk19, vacc0p0);
const float vi20 = *i20++;
const float vk20 = w[41];
vacc0p0 = math_muladd_f32(vi20, vk20, vacc0p0);
const float vi21 = *i21++;
const float vk21 = w[43];
vacc0p0 = math_muladd_f32(vi21, vk21, vacc0p0);
const float vi22 = *i22++;
const float vk22 = w[45];
vacc0p0 = math_muladd_f32(vi22, vk22, vacc0p0);
const float vi23 = *i23++;
const float vk23 = w[47];
vacc0p0 = math_muladd_f32(vi23, vk23, vacc0p0);
const float vi24 = *i24++;
const float vk24 = w[49];
vacc0p0 = math_muladd_f32(vi24, vk24, vacc0p0);
*output++ = vacc0p0;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 14,951 | 29.892562 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p4c-minmax-neon-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_25p4c__neon_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
const float32x4_t vi9x0123 = vld1q_f32(i9); i9 += 4;
const float32x4_t vk9x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi9x0123, vk9x0123);
const float32x4_t vi10x0123 = vld1q_f32(i10); i10 += 4;
const float32x4_t vk10x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi10x0123, vk10x0123);
const float32x4_t vi11x0123 = vld1q_f32(i11); i11 += 4;
const float32x4_t vk11x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi11x0123, vk11x0123);
const float32x4_t vi12x0123 = vld1q_f32(i12); i12 += 4;
const float32x4_t vk12x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi12x0123, vk12x0123);
const float32x4_t vi13x0123 = vld1q_f32(i13); i13 += 4;
const float32x4_t vk13x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi13x0123, vk13x0123);
const float32x4_t vi14x0123 = vld1q_f32(i14); i14 += 4;
const float32x4_t vk14x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi14x0123, vk14x0123);
const float32x4_t vi15x0123 = vld1q_f32(i15); i15 += 4;
const float32x4_t vk15x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi15x0123, vk15x0123);
const float32x4_t vi16x0123 = vld1q_f32(i16); i16 += 4;
const float32x4_t vk16x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi16x0123, vk16x0123);
const float32x4_t vi17x0123 = vld1q_f32(i17); i17 += 4;
const float32x4_t vk17x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi17x0123, vk17x0123);
const float32x4_t vi18x0123 = vld1q_f32(i18); i18 += 4;
const float32x4_t vk18x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi18x0123, vk18x0123);
const float32x4_t vi19x0123 = vld1q_f32(i19); i19 += 4;
const float32x4_t vk19x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi19x0123, vk19x0123);
const float32x4_t vi20x0123 = vld1q_f32(i20); i20 += 4;
const float32x4_t vk20x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi20x0123, vk20x0123);
const float32x4_t vi21x0123 = vld1q_f32(i21); i21 += 4;
const float32x4_t vk21x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi21x0123, vk21x0123);
const float32x4_t vi22x0123 = vld1q_f32(i22); i22 += 4;
const float32x4_t vk22x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi22x0123, vk22x0123);
const float32x4_t vi23x0123 = vld1q_f32(i23); i23 += 4;
const float32x4_t vk23x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi23x0123, vk23x0123);
const float32x4_t vi24x0123 = vld1q_f32(i24); i24 += 4;
const float32x4_t vk24x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi24x0123, vk24x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
const float32x4_t vi9x0123 = vld1q_f32(i9);
const float32x4_t vk9x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi9x0123, vk9x0123);
const float32x4_t vi10x0123 = vld1q_f32(i10);
const float32x4_t vk10x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi10x0123, vk10x0123);
const float32x4_t vi11x0123 = vld1q_f32(i11);
const float32x4_t vk11x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi11x0123, vk11x0123);
const float32x4_t vi12x0123 = vld1q_f32(i12);
const float32x4_t vk12x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi12x0123, vk12x0123);
const float32x4_t vi13x0123 = vld1q_f32(i13);
const float32x4_t vk13x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi13x0123, vk13x0123);
const float32x4_t vi14x0123 = vld1q_f32(i14);
const float32x4_t vk14x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi14x0123, vk14x0123);
const float32x4_t vi15x0123 = vld1q_f32(i15);
const float32x4_t vk15x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi15x0123, vk15x0123);
const float32x4_t vi16x0123 = vld1q_f32(i16);
const float32x4_t vk16x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi16x0123, vk16x0123);
const float32x4_t vi17x0123 = vld1q_f32(i17);
const float32x4_t vk17x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi17x0123, vk17x0123);
const float32x4_t vi18x0123 = vld1q_f32(i18);
const float32x4_t vk18x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi18x0123, vk18x0123);
const float32x4_t vi19x0123 = vld1q_f32(i19);
const float32x4_t vk19x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi19x0123, vk19x0123);
const float32x4_t vi20x0123 = vld1q_f32(i20);
const float32x4_t vk20x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi20x0123, vk20x0123);
const float32x4_t vi21x0123 = vld1q_f32(i21);
const float32x4_t vk21x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi21x0123, vk21x0123);
const float32x4_t vi22x0123 = vld1q_f32(i22);
const float32x4_t vk22x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi22x0123, vk22x0123);
const float32x4_t vi23x0123 = vld1q_f32(i23);
const float32x4_t vk23x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi23x0123, vk23x0123);
const float32x4_t vi24x0123 = vld1q_f32(i24);
const float32x4_t vk24x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi24x0123, vk24x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 14,996 | 36.4925 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p4c-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_25p4c__neon(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
const float32x4_t vi9x0123 = vld1q_f32(i9); i9 += 4;
const float32x4_t vk9x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi9x0123, vk9x0123);
const float32x4_t vi10x0123 = vld1q_f32(i10); i10 += 4;
const float32x4_t vk10x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi10x0123, vk10x0123);
const float32x4_t vi11x0123 = vld1q_f32(i11); i11 += 4;
const float32x4_t vk11x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi11x0123, vk11x0123);
const float32x4_t vi12x0123 = vld1q_f32(i12); i12 += 4;
const float32x4_t vk12x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi12x0123, vk12x0123);
const float32x4_t vi13x0123 = vld1q_f32(i13); i13 += 4;
const float32x4_t vk13x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi13x0123, vk13x0123);
const float32x4_t vi14x0123 = vld1q_f32(i14); i14 += 4;
const float32x4_t vk14x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi14x0123, vk14x0123);
const float32x4_t vi15x0123 = vld1q_f32(i15); i15 += 4;
const float32x4_t vk15x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi15x0123, vk15x0123);
const float32x4_t vi16x0123 = vld1q_f32(i16); i16 += 4;
const float32x4_t vk16x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi16x0123, vk16x0123);
const float32x4_t vi17x0123 = vld1q_f32(i17); i17 += 4;
const float32x4_t vk17x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi17x0123, vk17x0123);
const float32x4_t vi18x0123 = vld1q_f32(i18); i18 += 4;
const float32x4_t vk18x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi18x0123, vk18x0123);
const float32x4_t vi19x0123 = vld1q_f32(i19); i19 += 4;
const float32x4_t vk19x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi19x0123, vk19x0123);
const float32x4_t vi20x0123 = vld1q_f32(i20); i20 += 4;
const float32x4_t vk20x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi20x0123, vk20x0123);
const float32x4_t vi21x0123 = vld1q_f32(i21); i21 += 4;
const float32x4_t vk21x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi21x0123, vk21x0123);
const float32x4_t vi22x0123 = vld1q_f32(i22); i22 += 4;
const float32x4_t vk22x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi22x0123, vk22x0123);
const float32x4_t vi23x0123 = vld1q_f32(i23); i23 += 4;
const float32x4_t vk23x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi23x0123, vk23x0123);
const float32x4_t vi24x0123 = vld1q_f32(i24); i24 += 4;
const float32x4_t vk24x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi24x0123, vk24x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
const float32x4_t vi9x0123 = vld1q_f32(i9);
const float32x4_t vk9x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi9x0123, vk9x0123);
const float32x4_t vi10x0123 = vld1q_f32(i10);
const float32x4_t vk10x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi10x0123, vk10x0123);
const float32x4_t vi11x0123 = vld1q_f32(i11);
const float32x4_t vk11x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi11x0123, vk11x0123);
const float32x4_t vi12x0123 = vld1q_f32(i12);
const float32x4_t vk12x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi12x0123, vk12x0123);
const float32x4_t vi13x0123 = vld1q_f32(i13);
const float32x4_t vk13x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi13x0123, vk13x0123);
const float32x4_t vi14x0123 = vld1q_f32(i14);
const float32x4_t vk14x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi14x0123, vk14x0123);
const float32x4_t vi15x0123 = vld1q_f32(i15);
const float32x4_t vk15x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi15x0123, vk15x0123);
const float32x4_t vi16x0123 = vld1q_f32(i16);
const float32x4_t vk16x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi16x0123, vk16x0123);
const float32x4_t vi17x0123 = vld1q_f32(i17);
const float32x4_t vk17x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi17x0123, vk17x0123);
const float32x4_t vi18x0123 = vld1q_f32(i18);
const float32x4_t vk18x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi18x0123, vk18x0123);
const float32x4_t vi19x0123 = vld1q_f32(i19);
const float32x4_t vk19x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi19x0123, vk19x0123);
const float32x4_t vi20x0123 = vld1q_f32(i20);
const float32x4_t vk20x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi20x0123, vk20x0123);
const float32x4_t vi21x0123 = vld1q_f32(i21);
const float32x4_t vk21x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi21x0123, vk21x0123);
const float32x4_t vi22x0123 = vld1q_f32(i22);
const float32x4_t vk22x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi22x0123, vk22x0123);
const float32x4_t vi23x0123 = vld1q_f32(i23);
const float32x4_t vk23x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi23x0123, vk23x0123);
const float32x4_t vi24x0123 = vld1q_f32(i24);
const float32x4_t vk24x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi24x0123, vk24x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 14,789 | 36.348485 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p4c-minmax-neonfma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_25p4c__neonfma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
const float32x4_t vi9x0123 = vld1q_f32(i9); i9 += 4;
const float32x4_t vk9x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi9x0123, vk9x0123);
const float32x4_t vi10x0123 = vld1q_f32(i10); i10 += 4;
const float32x4_t vk10x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi10x0123, vk10x0123);
const float32x4_t vi11x0123 = vld1q_f32(i11); i11 += 4;
const float32x4_t vk11x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi11x0123, vk11x0123);
const float32x4_t vi12x0123 = vld1q_f32(i12); i12 += 4;
const float32x4_t vk12x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi12x0123, vk12x0123);
const float32x4_t vi13x0123 = vld1q_f32(i13); i13 += 4;
const float32x4_t vk13x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi13x0123, vk13x0123);
const float32x4_t vi14x0123 = vld1q_f32(i14); i14 += 4;
const float32x4_t vk14x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi14x0123, vk14x0123);
const float32x4_t vi15x0123 = vld1q_f32(i15); i15 += 4;
const float32x4_t vk15x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi15x0123, vk15x0123);
const float32x4_t vi16x0123 = vld1q_f32(i16); i16 += 4;
const float32x4_t vk16x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi16x0123, vk16x0123);
const float32x4_t vi17x0123 = vld1q_f32(i17); i17 += 4;
const float32x4_t vk17x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi17x0123, vk17x0123);
const float32x4_t vi18x0123 = vld1q_f32(i18); i18 += 4;
const float32x4_t vk18x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi18x0123, vk18x0123);
const float32x4_t vi19x0123 = vld1q_f32(i19); i19 += 4;
const float32x4_t vk19x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi19x0123, vk19x0123);
const float32x4_t vi20x0123 = vld1q_f32(i20); i20 += 4;
const float32x4_t vk20x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi20x0123, vk20x0123);
const float32x4_t vi21x0123 = vld1q_f32(i21); i21 += 4;
const float32x4_t vk21x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi21x0123, vk21x0123);
const float32x4_t vi22x0123 = vld1q_f32(i22); i22 += 4;
const float32x4_t vk22x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi22x0123, vk22x0123);
const float32x4_t vi23x0123 = vld1q_f32(i23); i23 += 4;
const float32x4_t vk23x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi23x0123, vk23x0123);
const float32x4_t vi24x0123 = vld1q_f32(i24); i24 += 4;
const float32x4_t vk24x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi24x0123, vk24x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
const float32x4_t vi9x0123 = vld1q_f32(i9);
const float32x4_t vk9x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi9x0123, vk9x0123);
const float32x4_t vi10x0123 = vld1q_f32(i10);
const float32x4_t vk10x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi10x0123, vk10x0123);
const float32x4_t vi11x0123 = vld1q_f32(i11);
const float32x4_t vk11x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi11x0123, vk11x0123);
const float32x4_t vi12x0123 = vld1q_f32(i12);
const float32x4_t vk12x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi12x0123, vk12x0123);
const float32x4_t vi13x0123 = vld1q_f32(i13);
const float32x4_t vk13x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi13x0123, vk13x0123);
const float32x4_t vi14x0123 = vld1q_f32(i14);
const float32x4_t vk14x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi14x0123, vk14x0123);
const float32x4_t vi15x0123 = vld1q_f32(i15);
const float32x4_t vk15x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi15x0123, vk15x0123);
const float32x4_t vi16x0123 = vld1q_f32(i16);
const float32x4_t vk16x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi16x0123, vk16x0123);
const float32x4_t vi17x0123 = vld1q_f32(i17);
const float32x4_t vk17x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi17x0123, vk17x0123);
const float32x4_t vi18x0123 = vld1q_f32(i18);
const float32x4_t vk18x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi18x0123, vk18x0123);
const float32x4_t vi19x0123 = vld1q_f32(i19);
const float32x4_t vk19x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi19x0123, vk19x0123);
const float32x4_t vi20x0123 = vld1q_f32(i20);
const float32x4_t vk20x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi20x0123, vk20x0123);
const float32x4_t vi21x0123 = vld1q_f32(i21);
const float32x4_t vk21x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi21x0123, vk21x0123);
const float32x4_t vi22x0123 = vld1q_f32(i22);
const float32x4_t vk22x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi22x0123, vk22x0123);
const float32x4_t vi23x0123 = vld1q_f32(i23);
const float32x4_t vk23x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi23x0123, vk23x0123);
const float32x4_t vi24x0123 = vld1q_f32(i24);
const float32x4_t vk24x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi24x0123, vk24x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 14,999 | 36.5 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p4c-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_25p4c__neonfma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
const float32x4_t vi9x0123 = vld1q_f32(i9); i9 += 4;
const float32x4_t vk9x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi9x0123, vk9x0123);
const float32x4_t vi10x0123 = vld1q_f32(i10); i10 += 4;
const float32x4_t vk10x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi10x0123, vk10x0123);
const float32x4_t vi11x0123 = vld1q_f32(i11); i11 += 4;
const float32x4_t vk11x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi11x0123, vk11x0123);
const float32x4_t vi12x0123 = vld1q_f32(i12); i12 += 4;
const float32x4_t vk12x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi12x0123, vk12x0123);
const float32x4_t vi13x0123 = vld1q_f32(i13); i13 += 4;
const float32x4_t vk13x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi13x0123, vk13x0123);
const float32x4_t vi14x0123 = vld1q_f32(i14); i14 += 4;
const float32x4_t vk14x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi14x0123, vk14x0123);
const float32x4_t vi15x0123 = vld1q_f32(i15); i15 += 4;
const float32x4_t vk15x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi15x0123, vk15x0123);
const float32x4_t vi16x0123 = vld1q_f32(i16); i16 += 4;
const float32x4_t vk16x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi16x0123, vk16x0123);
const float32x4_t vi17x0123 = vld1q_f32(i17); i17 += 4;
const float32x4_t vk17x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi17x0123, vk17x0123);
const float32x4_t vi18x0123 = vld1q_f32(i18); i18 += 4;
const float32x4_t vk18x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi18x0123, vk18x0123);
const float32x4_t vi19x0123 = vld1q_f32(i19); i19 += 4;
const float32x4_t vk19x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi19x0123, vk19x0123);
const float32x4_t vi20x0123 = vld1q_f32(i20); i20 += 4;
const float32x4_t vk20x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi20x0123, vk20x0123);
const float32x4_t vi21x0123 = vld1q_f32(i21); i21 += 4;
const float32x4_t vk21x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi21x0123, vk21x0123);
const float32x4_t vi22x0123 = vld1q_f32(i22); i22 += 4;
const float32x4_t vk22x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi22x0123, vk22x0123);
const float32x4_t vi23x0123 = vld1q_f32(i23); i23 += 4;
const float32x4_t vk23x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi23x0123, vk23x0123);
const float32x4_t vi24x0123 = vld1q_f32(i24); i24 += 4;
const float32x4_t vk24x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi24x0123, vk24x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
const float32x4_t vi9x0123 = vld1q_f32(i9);
const float32x4_t vk9x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi9x0123, vk9x0123);
const float32x4_t vi10x0123 = vld1q_f32(i10);
const float32x4_t vk10x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi10x0123, vk10x0123);
const float32x4_t vi11x0123 = vld1q_f32(i11);
const float32x4_t vk11x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi11x0123, vk11x0123);
const float32x4_t vi12x0123 = vld1q_f32(i12);
const float32x4_t vk12x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi12x0123, vk12x0123);
const float32x4_t vi13x0123 = vld1q_f32(i13);
const float32x4_t vk13x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi13x0123, vk13x0123);
const float32x4_t vi14x0123 = vld1q_f32(i14);
const float32x4_t vk14x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi14x0123, vk14x0123);
const float32x4_t vi15x0123 = vld1q_f32(i15);
const float32x4_t vk15x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi15x0123, vk15x0123);
const float32x4_t vi16x0123 = vld1q_f32(i16);
const float32x4_t vk16x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi16x0123, vk16x0123);
const float32x4_t vi17x0123 = vld1q_f32(i17);
const float32x4_t vk17x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi17x0123, vk17x0123);
const float32x4_t vi18x0123 = vld1q_f32(i18);
const float32x4_t vk18x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi18x0123, vk18x0123);
const float32x4_t vi19x0123 = vld1q_f32(i19);
const float32x4_t vk19x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi19x0123, vk19x0123);
const float32x4_t vi20x0123 = vld1q_f32(i20);
const float32x4_t vk20x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi20x0123, vk20x0123);
const float32x4_t vi21x0123 = vld1q_f32(i21);
const float32x4_t vk21x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi21x0123, vk21x0123);
const float32x4_t vi22x0123 = vld1q_f32(i22);
const float32x4_t vk22x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi22x0123, vk22x0123);
const float32x4_t vi23x0123 = vld1q_f32(i23);
const float32x4_t vk23x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi23x0123, vk23x0123);
const float32x4_t vi24x0123 = vld1q_f32(i24);
const float32x4_t vk24x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi24x0123, vk24x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 14,792 | 36.356061 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p4c-minmax-sse-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_25p4c__sse_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vk5x0123 = _mm_load_ps(w + 24);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vk6x0123 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vk7x0123 = _mm_load_ps(w + 32);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi7x0123, vk7x0123));
const __m128 vi8x0123 = _mm_loadu_ps(i8);
i8 += 4;
const __m128 vk8x0123 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
const __m128 vi9x0123 = _mm_loadu_ps(i9);
i9 += 4;
const __m128 vk9x0123 = _mm_load_ps(w + 40);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi9x0123, vk9x0123));
const __m128 vi10x0123 = _mm_loadu_ps(i10);
i10 += 4;
const __m128 vk10x0123 = _mm_load_ps(w + 44);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi10x0123, vk10x0123));
const __m128 vi11x0123 = _mm_loadu_ps(i11);
i11 += 4;
const __m128 vk11x0123 = _mm_load_ps(w + 48);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi11x0123, vk11x0123));
const __m128 vi12x0123 = _mm_loadu_ps(i12);
i12 += 4;
const __m128 vk12x0123 = _mm_load_ps(w + 52);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi12x0123, vk12x0123));
const __m128 vi13x0123 = _mm_loadu_ps(i13);
i13 += 4;
const __m128 vk13x0123 = _mm_load_ps(w + 56);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi13x0123, vk13x0123));
const __m128 vi14x0123 = _mm_loadu_ps(i14);
i14 += 4;
const __m128 vk14x0123 = _mm_load_ps(w + 60);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi14x0123, vk14x0123));
const __m128 vi15x0123 = _mm_loadu_ps(i15);
i15 += 4;
const __m128 vk15x0123 = _mm_load_ps(w + 64);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi15x0123, vk15x0123));
const __m128 vi16x0123 = _mm_loadu_ps(i16);
i16 += 4;
const __m128 vk16x0123 = _mm_load_ps(w + 68);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi16x0123, vk16x0123));
const __m128 vi17x0123 = _mm_loadu_ps(i17);
i17 += 4;
const __m128 vk17x0123 = _mm_load_ps(w + 72);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi17x0123, vk17x0123));
const __m128 vi18x0123 = _mm_loadu_ps(i18);
i18 += 4;
const __m128 vk18x0123 = _mm_load_ps(w + 76);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi18x0123, vk18x0123));
const __m128 vi19x0123 = _mm_loadu_ps(i19);
i19 += 4;
const __m128 vk19x0123 = _mm_load_ps(w + 80);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi19x0123, vk19x0123));
const __m128 vi20x0123 = _mm_loadu_ps(i20);
i20 += 4;
const __m128 vk20x0123 = _mm_load_ps(w + 84);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi20x0123, vk20x0123));
const __m128 vi21x0123 = _mm_loadu_ps(i21);
i21 += 4;
const __m128 vk21x0123 = _mm_load_ps(w + 88);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi21x0123, vk21x0123));
const __m128 vi22x0123 = _mm_loadu_ps(i22);
i22 += 4;
const __m128 vk22x0123 = _mm_load_ps(w + 92);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi22x0123, vk22x0123));
const __m128 vi23x0123 = _mm_loadu_ps(i23);
i23 += 4;
const __m128 vk23x0123 = _mm_load_ps(w + 96);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi23x0123, vk23x0123));
const __m128 vi24x0123 = _mm_loadu_ps(i24);
i24 += 4;
const __m128 vk24x0123 = _mm_load_ps(w + 100);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi24x0123, vk24x0123));
w += 104;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vk1x0123 = _mm_load_ps(w + 8);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vk4x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
const __m128 vk5x0123 = _mm_load_ps(w + 24);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
const __m128 vk6x0123 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
const __m128 vk7x0123 = _mm_load_ps(w + 32);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi7x0123, vk7x0123));
const __m128 vi8x0123 = _mm_loadu_ps(i8);
const __m128 vk8x0123 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
const __m128 vi9x0123 = _mm_loadu_ps(i9);
const __m128 vk9x0123 = _mm_load_ps(w + 40);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi9x0123, vk9x0123));
const __m128 vi10x0123 = _mm_loadu_ps(i10);
const __m128 vk10x0123 = _mm_load_ps(w + 44);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi10x0123, vk10x0123));
const __m128 vi11x0123 = _mm_loadu_ps(i11);
const __m128 vk11x0123 = _mm_load_ps(w + 48);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi11x0123, vk11x0123));
const __m128 vi12x0123 = _mm_loadu_ps(i12);
const __m128 vk12x0123 = _mm_load_ps(w + 52);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi12x0123, vk12x0123));
const __m128 vi13x0123 = _mm_loadu_ps(i13);
const __m128 vk13x0123 = _mm_load_ps(w + 56);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi13x0123, vk13x0123));
const __m128 vi14x0123 = _mm_loadu_ps(i14);
const __m128 vk14x0123 = _mm_load_ps(w + 60);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi14x0123, vk14x0123));
const __m128 vi15x0123 = _mm_loadu_ps(i15);
const __m128 vk15x0123 = _mm_load_ps(w + 64);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi15x0123, vk15x0123));
const __m128 vi16x0123 = _mm_loadu_ps(i16);
const __m128 vk16x0123 = _mm_load_ps(w + 68);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi16x0123, vk16x0123));
const __m128 vi17x0123 = _mm_loadu_ps(i17);
const __m128 vk17x0123 = _mm_load_ps(w + 72);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi17x0123, vk17x0123));
const __m128 vi18x0123 = _mm_loadu_ps(i18);
const __m128 vk18x0123 = _mm_load_ps(w + 76);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi18x0123, vk18x0123));
const __m128 vi19x0123 = _mm_loadu_ps(i19);
const __m128 vk19x0123 = _mm_load_ps(w + 80);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi19x0123, vk19x0123));
const __m128 vi20x0123 = _mm_loadu_ps(i20);
const __m128 vk20x0123 = _mm_load_ps(w + 84);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi20x0123, vk20x0123));
const __m128 vi21x0123 = _mm_loadu_ps(i21);
const __m128 vk21x0123 = _mm_load_ps(w + 88);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi21x0123, vk21x0123));
const __m128 vi22x0123 = _mm_loadu_ps(i22);
const __m128 vk22x0123 = _mm_load_ps(w + 92);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi22x0123, vk22x0123));
const __m128 vi23x0123 = _mm_loadu_ps(i23);
const __m128 vk23x0123 = _mm_load_ps(w + 96);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi23x0123, vk23x0123));
const __m128 vi24x0123 = _mm_loadu_ps(i24);
const __m128 vk24x0123 = _mm_load_ps(w + 100);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi24x0123, vk24x0123));
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,352 | 32.966814 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p4c-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_25p4c__sse(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vk4x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vk5x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vk6x0123 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
i7 += 4;
const __m128 vk7x0123 = _mm_load_ps(w + 32);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi7x0123, vk7x0123));
const __m128 vi8x0123 = _mm_loadu_ps(i8);
i8 += 4;
const __m128 vk8x0123 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
const __m128 vi9x0123 = _mm_loadu_ps(i9);
i9 += 4;
const __m128 vk9x0123 = _mm_load_ps(w + 40);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi9x0123, vk9x0123));
const __m128 vi10x0123 = _mm_loadu_ps(i10);
i10 += 4;
const __m128 vk10x0123 = _mm_load_ps(w + 44);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi10x0123, vk10x0123));
const __m128 vi11x0123 = _mm_loadu_ps(i11);
i11 += 4;
const __m128 vk11x0123 = _mm_load_ps(w + 48);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi11x0123, vk11x0123));
const __m128 vi12x0123 = _mm_loadu_ps(i12);
i12 += 4;
const __m128 vk12x0123 = _mm_load_ps(w + 52);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi12x0123, vk12x0123));
const __m128 vi13x0123 = _mm_loadu_ps(i13);
i13 += 4;
const __m128 vk13x0123 = _mm_load_ps(w + 56);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi13x0123, vk13x0123));
const __m128 vi14x0123 = _mm_loadu_ps(i14);
i14 += 4;
const __m128 vk14x0123 = _mm_load_ps(w + 60);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi14x0123, vk14x0123));
const __m128 vi15x0123 = _mm_loadu_ps(i15);
i15 += 4;
const __m128 vk15x0123 = _mm_load_ps(w + 64);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi15x0123, vk15x0123));
const __m128 vi16x0123 = _mm_loadu_ps(i16);
i16 += 4;
const __m128 vk16x0123 = _mm_load_ps(w + 68);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi16x0123, vk16x0123));
const __m128 vi17x0123 = _mm_loadu_ps(i17);
i17 += 4;
const __m128 vk17x0123 = _mm_load_ps(w + 72);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi17x0123, vk17x0123));
const __m128 vi18x0123 = _mm_loadu_ps(i18);
i18 += 4;
const __m128 vk18x0123 = _mm_load_ps(w + 76);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi18x0123, vk18x0123));
const __m128 vi19x0123 = _mm_loadu_ps(i19);
i19 += 4;
const __m128 vk19x0123 = _mm_load_ps(w + 80);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi19x0123, vk19x0123));
const __m128 vi20x0123 = _mm_loadu_ps(i20);
i20 += 4;
const __m128 vk20x0123 = _mm_load_ps(w + 84);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi20x0123, vk20x0123));
const __m128 vi21x0123 = _mm_loadu_ps(i21);
i21 += 4;
const __m128 vk21x0123 = _mm_load_ps(w + 88);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi21x0123, vk21x0123));
const __m128 vi22x0123 = _mm_loadu_ps(i22);
i22 += 4;
const __m128 vk22x0123 = _mm_load_ps(w + 92);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi22x0123, vk22x0123));
const __m128 vi23x0123 = _mm_loadu_ps(i23);
i23 += 4;
const __m128 vk23x0123 = _mm_load_ps(w + 96);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi23x0123, vk23x0123));
const __m128 vi24x0123 = _mm_loadu_ps(i24);
i24 += 4;
const __m128 vk24x0123 = _mm_load_ps(w + 100);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi24x0123, vk24x0123));
w += 104;
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vk1x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
const __m128 vi4x0123 = _mm_loadu_ps(i4);
const __m128 vk4x0123 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
const __m128 vi5x0123 = _mm_loadu_ps(i5);
const __m128 vk5x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi5x0123, vk5x0123));
const __m128 vi6x0123 = _mm_loadu_ps(i6);
const __m128 vk6x0123 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
const __m128 vi7x0123 = _mm_loadu_ps(i7);
const __m128 vk7x0123 = _mm_load_ps(w + 32);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi7x0123, vk7x0123));
const __m128 vi8x0123 = _mm_loadu_ps(i8);
const __m128 vk8x0123 = _mm_load_ps(w + 36);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
const __m128 vi9x0123 = _mm_loadu_ps(i9);
const __m128 vk9x0123 = _mm_load_ps(w + 40);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi9x0123, vk9x0123));
const __m128 vi10x0123 = _mm_loadu_ps(i10);
const __m128 vk10x0123 = _mm_load_ps(w + 44);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi10x0123, vk10x0123));
const __m128 vi11x0123 = _mm_loadu_ps(i11);
const __m128 vk11x0123 = _mm_load_ps(w + 48);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi11x0123, vk11x0123));
const __m128 vi12x0123 = _mm_loadu_ps(i12);
const __m128 vk12x0123 = _mm_load_ps(w + 52);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi12x0123, vk12x0123));
const __m128 vi13x0123 = _mm_loadu_ps(i13);
const __m128 vk13x0123 = _mm_load_ps(w + 56);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi13x0123, vk13x0123));
const __m128 vi14x0123 = _mm_loadu_ps(i14);
const __m128 vk14x0123 = _mm_load_ps(w + 60);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi14x0123, vk14x0123));
const __m128 vi15x0123 = _mm_loadu_ps(i15);
const __m128 vk15x0123 = _mm_load_ps(w + 64);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi15x0123, vk15x0123));
const __m128 vi16x0123 = _mm_loadu_ps(i16);
const __m128 vk16x0123 = _mm_load_ps(w + 68);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi16x0123, vk16x0123));
const __m128 vi17x0123 = _mm_loadu_ps(i17);
const __m128 vk17x0123 = _mm_load_ps(w + 72);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi17x0123, vk17x0123));
const __m128 vi18x0123 = _mm_loadu_ps(i18);
const __m128 vk18x0123 = _mm_load_ps(w + 76);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi18x0123, vk18x0123));
const __m128 vi19x0123 = _mm_loadu_ps(i19);
const __m128 vk19x0123 = _mm_load_ps(w + 80);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi19x0123, vk19x0123));
const __m128 vi20x0123 = _mm_loadu_ps(i20);
const __m128 vk20x0123 = _mm_load_ps(w + 84);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi20x0123, vk20x0123));
const __m128 vi21x0123 = _mm_loadu_ps(i21);
const __m128 vk21x0123 = _mm_load_ps(w + 88);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi21x0123, vk21x0123));
const __m128 vi22x0123 = _mm_loadu_ps(i22);
const __m128 vk22x0123 = _mm_load_ps(w + 92);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi22x0123, vk22x0123));
const __m128 vi23x0123 = _mm_loadu_ps(i23);
const __m128 vk23x0123 = _mm_load_ps(w + 96);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi23x0123, vk23x0123));
const __m128 vi24x0123 = _mm_loadu_ps(i24);
const __m128 vk24x0123 = _mm_load_ps(w + 100);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi24x0123, vk24x0123));
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,177 | 32.879464 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p4c-minmax-wasmrelaxedsimd-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_25p4c__wasmrelaxedsimd_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
const v128_t vi9x0123 = wasm_v128_load(i9);
i9 += 4;
const v128_t vk9x0123 = wasm_v128_load(w + 40);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi9x0123, vk9x0123), vacc0123p1);
const v128_t vi10x0123 = wasm_v128_load(i10);
i10 += 4;
const v128_t vk10x0123 = wasm_v128_load(w + 44);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi10x0123, vk10x0123), vacc0123p0);
const v128_t vi11x0123 = wasm_v128_load(i11);
i11 += 4;
const v128_t vk11x0123 = wasm_v128_load(w + 48);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi11x0123, vk11x0123), vacc0123p1);
const v128_t vi12x0123 = wasm_v128_load(i12);
i12 += 4;
const v128_t vk12x0123 = wasm_v128_load(w + 52);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi12x0123, vk12x0123), vacc0123p0);
const v128_t vi13x0123 = wasm_v128_load(i13);
i13 += 4;
const v128_t vk13x0123 = wasm_v128_load(w + 56);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi13x0123, vk13x0123), vacc0123p1);
const v128_t vi14x0123 = wasm_v128_load(i14);
i14 += 4;
const v128_t vk14x0123 = wasm_v128_load(w + 60);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi14x0123, vk14x0123), vacc0123p0);
const v128_t vi15x0123 = wasm_v128_load(i15);
i15 += 4;
const v128_t vk15x0123 = wasm_v128_load(w + 64);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi15x0123, vk15x0123), vacc0123p1);
const v128_t vi16x0123 = wasm_v128_load(i16);
i16 += 4;
const v128_t vk16x0123 = wasm_v128_load(w + 68);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi16x0123, vk16x0123), vacc0123p0);
const v128_t vi17x0123 = wasm_v128_load(i17);
i17 += 4;
const v128_t vk17x0123 = wasm_v128_load(w + 72);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi17x0123, vk17x0123), vacc0123p1);
const v128_t vi18x0123 = wasm_v128_load(i18);
i18 += 4;
const v128_t vk18x0123 = wasm_v128_load(w + 76);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi18x0123, vk18x0123), vacc0123p0);
const v128_t vi19x0123 = wasm_v128_load(i19);
i19 += 4;
const v128_t vk19x0123 = wasm_v128_load(w + 80);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi19x0123, vk19x0123), vacc0123p1);
const v128_t vi20x0123 = wasm_v128_load(i20);
i20 += 4;
const v128_t vk20x0123 = wasm_v128_load(w + 84);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi20x0123, vk20x0123), vacc0123p0);
const v128_t vi21x0123 = wasm_v128_load(i21);
i21 += 4;
const v128_t vk21x0123 = wasm_v128_load(w + 88);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi21x0123, vk21x0123), vacc0123p1);
const v128_t vi22x0123 = wasm_v128_load(i22);
i22 += 4;
const v128_t vk22x0123 = wasm_v128_load(w + 92);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi22x0123, vk22x0123), vacc0123p0);
const v128_t vi23x0123 = wasm_v128_load(i23);
i23 += 4;
const v128_t vk23x0123 = wasm_v128_load(w + 96);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi23x0123, vk23x0123), vacc0123p1);
const v128_t vi24x0123 = wasm_v128_load(i24);
i24 += 4;
const v128_t vk24x0123 = wasm_v128_load(w + 100);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi24x0123, vk24x0123), vacc0123p0);
w += 104;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
const v128_t vi9x0123 = wasm_v128_load(i9);
const v128_t vk9x0123 = wasm_v128_load(w + 40);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi9x0123, vk9x0123), vacc0123p1);
const v128_t vi10x0123 = wasm_v128_load(i10);
const v128_t vk10x0123 = wasm_v128_load(w + 44);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi10x0123, vk10x0123), vacc0123p0);
const v128_t vi11x0123 = wasm_v128_load(i11);
const v128_t vk11x0123 = wasm_v128_load(w + 48);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi11x0123, vk11x0123), vacc0123p1);
const v128_t vi12x0123 = wasm_v128_load(i12);
const v128_t vk12x0123 = wasm_v128_load(w + 52);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi12x0123, vk12x0123), vacc0123p0);
const v128_t vi13x0123 = wasm_v128_load(i13);
const v128_t vk13x0123 = wasm_v128_load(w + 56);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi13x0123, vk13x0123), vacc0123p1);
const v128_t vi14x0123 = wasm_v128_load(i14);
const v128_t vk14x0123 = wasm_v128_load(w + 60);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi14x0123, vk14x0123), vacc0123p0);
const v128_t vi15x0123 = wasm_v128_load(i15);
const v128_t vk15x0123 = wasm_v128_load(w + 64);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi15x0123, vk15x0123), vacc0123p1);
const v128_t vi16x0123 = wasm_v128_load(i16);
const v128_t vk16x0123 = wasm_v128_load(w + 68);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi16x0123, vk16x0123), vacc0123p0);
const v128_t vi17x0123 = wasm_v128_load(i17);
const v128_t vk17x0123 = wasm_v128_load(w + 72);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi17x0123, vk17x0123), vacc0123p1);
const v128_t vi18x0123 = wasm_v128_load(i18);
const v128_t vk18x0123 = wasm_v128_load(w + 76);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi18x0123, vk18x0123), vacc0123p0);
const v128_t vi19x0123 = wasm_v128_load(i19);
const v128_t vk19x0123 = wasm_v128_load(w + 80);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi19x0123, vk19x0123), vacc0123p1);
const v128_t vi20x0123 = wasm_v128_load(i20);
const v128_t vk20x0123 = wasm_v128_load(w + 84);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi20x0123, vk20x0123), vacc0123p0);
const v128_t vi21x0123 = wasm_v128_load(i21);
const v128_t vk21x0123 = wasm_v128_load(w + 88);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi21x0123, vk21x0123), vacc0123p1);
const v128_t vi22x0123 = wasm_v128_load(i22);
const v128_t vk22x0123 = wasm_v128_load(w + 92);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi22x0123, vk22x0123), vacc0123p0);
const v128_t vi23x0123 = wasm_v128_load(i23);
const v128_t vk23x0123 = wasm_v128_load(w + 96);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi23x0123, vk23x0123), vacc0123p1);
const v128_t vi24x0123 = wasm_v128_load(i24);
const v128_t vk24x0123 = wasm_v128_load(w + 100);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi24x0123, vk24x0123), vacc0123p0);
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,178 | 34.715232 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p4c-minmax-wasmrelaxedsimd-fma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_25p4c__wasmrelaxedsimd_fma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi5x0123, vk5x0123, vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi6x0123, vk6x0123, vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi7x0123, vk7x0123, vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi8x0123, vk8x0123, vacc0123p0);
const v128_t vi9x0123 = wasm_v128_load(i9);
i9 += 4;
const v128_t vk9x0123 = wasm_v128_load(w + 40);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi9x0123, vk9x0123, vacc0123p1);
const v128_t vi10x0123 = wasm_v128_load(i10);
i10 += 4;
const v128_t vk10x0123 = wasm_v128_load(w + 44);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi10x0123, vk10x0123, vacc0123p0);
const v128_t vi11x0123 = wasm_v128_load(i11);
i11 += 4;
const v128_t vk11x0123 = wasm_v128_load(w + 48);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi11x0123, vk11x0123, vacc0123p1);
const v128_t vi12x0123 = wasm_v128_load(i12);
i12 += 4;
const v128_t vk12x0123 = wasm_v128_load(w + 52);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi12x0123, vk12x0123, vacc0123p0);
const v128_t vi13x0123 = wasm_v128_load(i13);
i13 += 4;
const v128_t vk13x0123 = wasm_v128_load(w + 56);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi13x0123, vk13x0123, vacc0123p1);
const v128_t vi14x0123 = wasm_v128_load(i14);
i14 += 4;
const v128_t vk14x0123 = wasm_v128_load(w + 60);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi14x0123, vk14x0123, vacc0123p0);
const v128_t vi15x0123 = wasm_v128_load(i15);
i15 += 4;
const v128_t vk15x0123 = wasm_v128_load(w + 64);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi15x0123, vk15x0123, vacc0123p1);
const v128_t vi16x0123 = wasm_v128_load(i16);
i16 += 4;
const v128_t vk16x0123 = wasm_v128_load(w + 68);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi16x0123, vk16x0123, vacc0123p0);
const v128_t vi17x0123 = wasm_v128_load(i17);
i17 += 4;
const v128_t vk17x0123 = wasm_v128_load(w + 72);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi17x0123, vk17x0123, vacc0123p1);
const v128_t vi18x0123 = wasm_v128_load(i18);
i18 += 4;
const v128_t vk18x0123 = wasm_v128_load(w + 76);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi18x0123, vk18x0123, vacc0123p0);
const v128_t vi19x0123 = wasm_v128_load(i19);
i19 += 4;
const v128_t vk19x0123 = wasm_v128_load(w + 80);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi19x0123, vk19x0123, vacc0123p1);
const v128_t vi20x0123 = wasm_v128_load(i20);
i20 += 4;
const v128_t vk20x0123 = wasm_v128_load(w + 84);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi20x0123, vk20x0123, vacc0123p0);
const v128_t vi21x0123 = wasm_v128_load(i21);
i21 += 4;
const v128_t vk21x0123 = wasm_v128_load(w + 88);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi21x0123, vk21x0123, vacc0123p1);
const v128_t vi22x0123 = wasm_v128_load(i22);
i22 += 4;
const v128_t vk22x0123 = wasm_v128_load(w + 92);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi22x0123, vk22x0123, vacc0123p0);
const v128_t vi23x0123 = wasm_v128_load(i23);
i23 += 4;
const v128_t vk23x0123 = wasm_v128_load(w + 96);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi23x0123, vk23x0123, vacc0123p1);
const v128_t vi24x0123 = wasm_v128_load(i24);
i24 += 4;
const v128_t vk24x0123 = wasm_v128_load(w + 100);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi24x0123, vk24x0123, vacc0123p0);
w += 104;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi5x0123, vk5x0123, vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi6x0123, vk6x0123, vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi7x0123, vk7x0123, vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi8x0123, vk8x0123, vacc0123p0);
const v128_t vi9x0123 = wasm_v128_load(i9);
const v128_t vk9x0123 = wasm_v128_load(w + 40);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi9x0123, vk9x0123, vacc0123p1);
const v128_t vi10x0123 = wasm_v128_load(i10);
const v128_t vk10x0123 = wasm_v128_load(w + 44);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi10x0123, vk10x0123, vacc0123p0);
const v128_t vi11x0123 = wasm_v128_load(i11);
const v128_t vk11x0123 = wasm_v128_load(w + 48);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi11x0123, vk11x0123, vacc0123p1);
const v128_t vi12x0123 = wasm_v128_load(i12);
const v128_t vk12x0123 = wasm_v128_load(w + 52);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi12x0123, vk12x0123, vacc0123p0);
const v128_t vi13x0123 = wasm_v128_load(i13);
const v128_t vk13x0123 = wasm_v128_load(w + 56);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi13x0123, vk13x0123, vacc0123p1);
const v128_t vi14x0123 = wasm_v128_load(i14);
const v128_t vk14x0123 = wasm_v128_load(w + 60);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi14x0123, vk14x0123, vacc0123p0);
const v128_t vi15x0123 = wasm_v128_load(i15);
const v128_t vk15x0123 = wasm_v128_load(w + 64);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi15x0123, vk15x0123, vacc0123p1);
const v128_t vi16x0123 = wasm_v128_load(i16);
const v128_t vk16x0123 = wasm_v128_load(w + 68);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi16x0123, vk16x0123, vacc0123p0);
const v128_t vi17x0123 = wasm_v128_load(i17);
const v128_t vk17x0123 = wasm_v128_load(w + 72);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi17x0123, vk17x0123, vacc0123p1);
const v128_t vi18x0123 = wasm_v128_load(i18);
const v128_t vk18x0123 = wasm_v128_load(w + 76);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi18x0123, vk18x0123, vacc0123p0);
const v128_t vi19x0123 = wasm_v128_load(i19);
const v128_t vk19x0123 = wasm_v128_load(w + 80);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi19x0123, vk19x0123, vacc0123p1);
const v128_t vi20x0123 = wasm_v128_load(i20);
const v128_t vk20x0123 = wasm_v128_load(w + 84);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi20x0123, vk20x0123, vacc0123p0);
const v128_t vi21x0123 = wasm_v128_load(i21);
const v128_t vk21x0123 = wasm_v128_load(w + 88);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi21x0123, vk21x0123, vacc0123p1);
const v128_t vi22x0123 = wasm_v128_load(i22);
const v128_t vk22x0123 = wasm_v128_load(w + 92);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi22x0123, vk22x0123, vacc0123p0);
const v128_t vi23x0123 = wasm_v128_load(i23);
const v128_t vk23x0123 = wasm_v128_load(w + 96);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi23x0123, vk23x0123, vacc0123p1);
const v128_t vi24x0123 = wasm_v128_load(i24);
const v128_t vk24x0123 = wasm_v128_load(w + 100);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi24x0123, vk24x0123, vacc0123p0);
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,326 | 35.041943 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p4c-minmax-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_25p4c__wasmrelaxedsimd_fma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi5x0123, vk5x0123, vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi6x0123, vk6x0123, vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi7x0123, vk7x0123, vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi8x0123, vk8x0123, vacc0123p0);
const v128_t vi9x0123 = wasm_v128_load(i9);
i9 += 4;
const v128_t vk9x0123 = wasm_v128_load(w + 40);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi9x0123, vk9x0123, vacc0123p0);
const v128_t vi10x0123 = wasm_v128_load(i10);
i10 += 4;
const v128_t vk10x0123 = wasm_v128_load(w + 44);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi10x0123, vk10x0123, vacc0123p0);
const v128_t vi11x0123 = wasm_v128_load(i11);
i11 += 4;
const v128_t vk11x0123 = wasm_v128_load(w + 48);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi11x0123, vk11x0123, vacc0123p0);
const v128_t vi12x0123 = wasm_v128_load(i12);
i12 += 4;
const v128_t vk12x0123 = wasm_v128_load(w + 52);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi12x0123, vk12x0123, vacc0123p0);
const v128_t vi13x0123 = wasm_v128_load(i13);
i13 += 4;
const v128_t vk13x0123 = wasm_v128_load(w + 56);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi13x0123, vk13x0123, vacc0123p0);
const v128_t vi14x0123 = wasm_v128_load(i14);
i14 += 4;
const v128_t vk14x0123 = wasm_v128_load(w + 60);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi14x0123, vk14x0123, vacc0123p0);
const v128_t vi15x0123 = wasm_v128_load(i15);
i15 += 4;
const v128_t vk15x0123 = wasm_v128_load(w + 64);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi15x0123, vk15x0123, vacc0123p0);
const v128_t vi16x0123 = wasm_v128_load(i16);
i16 += 4;
const v128_t vk16x0123 = wasm_v128_load(w + 68);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi16x0123, vk16x0123, vacc0123p0);
const v128_t vi17x0123 = wasm_v128_load(i17);
i17 += 4;
const v128_t vk17x0123 = wasm_v128_load(w + 72);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi17x0123, vk17x0123, vacc0123p0);
const v128_t vi18x0123 = wasm_v128_load(i18);
i18 += 4;
const v128_t vk18x0123 = wasm_v128_load(w + 76);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi18x0123, vk18x0123, vacc0123p0);
const v128_t vi19x0123 = wasm_v128_load(i19);
i19 += 4;
const v128_t vk19x0123 = wasm_v128_load(w + 80);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi19x0123, vk19x0123, vacc0123p0);
const v128_t vi20x0123 = wasm_v128_load(i20);
i20 += 4;
const v128_t vk20x0123 = wasm_v128_load(w + 84);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi20x0123, vk20x0123, vacc0123p0);
const v128_t vi21x0123 = wasm_v128_load(i21);
i21 += 4;
const v128_t vk21x0123 = wasm_v128_load(w + 88);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi21x0123, vk21x0123, vacc0123p0);
const v128_t vi22x0123 = wasm_v128_load(i22);
i22 += 4;
const v128_t vk22x0123 = wasm_v128_load(w + 92);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi22x0123, vk22x0123, vacc0123p0);
const v128_t vi23x0123 = wasm_v128_load(i23);
i23 += 4;
const v128_t vk23x0123 = wasm_v128_load(w + 96);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi23x0123, vk23x0123, vacc0123p0);
const v128_t vi24x0123 = wasm_v128_load(i24);
i24 += 4;
const v128_t vk24x0123 = wasm_v128_load(w + 100);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi24x0123, vk24x0123, vacc0123p0);
w += 104;
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi5x0123, vk5x0123, vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi6x0123, vk6x0123, vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi7x0123, vk7x0123, vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi8x0123, vk8x0123, vacc0123p0);
const v128_t vi9x0123 = wasm_v128_load(i9);
const v128_t vk9x0123 = wasm_v128_load(w + 40);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi9x0123, vk9x0123, vacc0123p0);
const v128_t vi10x0123 = wasm_v128_load(i10);
const v128_t vk10x0123 = wasm_v128_load(w + 44);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi10x0123, vk10x0123, vacc0123p0);
const v128_t vi11x0123 = wasm_v128_load(i11);
const v128_t vk11x0123 = wasm_v128_load(w + 48);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi11x0123, vk11x0123, vacc0123p0);
const v128_t vi12x0123 = wasm_v128_load(i12);
const v128_t vk12x0123 = wasm_v128_load(w + 52);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi12x0123, vk12x0123, vacc0123p0);
const v128_t vi13x0123 = wasm_v128_load(i13);
const v128_t vk13x0123 = wasm_v128_load(w + 56);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi13x0123, vk13x0123, vacc0123p0);
const v128_t vi14x0123 = wasm_v128_load(i14);
const v128_t vk14x0123 = wasm_v128_load(w + 60);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi14x0123, vk14x0123, vacc0123p0);
const v128_t vi15x0123 = wasm_v128_load(i15);
const v128_t vk15x0123 = wasm_v128_load(w + 64);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi15x0123, vk15x0123, vacc0123p0);
const v128_t vi16x0123 = wasm_v128_load(i16);
const v128_t vk16x0123 = wasm_v128_load(w + 68);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi16x0123, vk16x0123, vacc0123p0);
const v128_t vi17x0123 = wasm_v128_load(i17);
const v128_t vk17x0123 = wasm_v128_load(w + 72);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi17x0123, vk17x0123, vacc0123p0);
const v128_t vi18x0123 = wasm_v128_load(i18);
const v128_t vk18x0123 = wasm_v128_load(w + 76);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi18x0123, vk18x0123, vacc0123p0);
const v128_t vi19x0123 = wasm_v128_load(i19);
const v128_t vk19x0123 = wasm_v128_load(w + 80);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi19x0123, vk19x0123, vacc0123p0);
const v128_t vi20x0123 = wasm_v128_load(i20);
const v128_t vk20x0123 = wasm_v128_load(w + 84);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi20x0123, vk20x0123, vacc0123p0);
const v128_t vi21x0123 = wasm_v128_load(i21);
const v128_t vk21x0123 = wasm_v128_load(w + 88);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi21x0123, vk21x0123, vacc0123p0);
const v128_t vi22x0123 = wasm_v128_load(i22);
const v128_t vk22x0123 = wasm_v128_load(w + 92);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi22x0123, vk22x0123, vacc0123p0);
const v128_t vi23x0123 = wasm_v128_load(i23);
const v128_t vk23x0123 = wasm_v128_load(w + 96);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi23x0123, vk23x0123, vacc0123p0);
const v128_t vi24x0123 = wasm_v128_load(i24);
const v128_t vk24x0123 = wasm_v128_load(w + 100);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi24x0123, vk24x0123, vacc0123p0);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,157 | 34.986637 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p4c-minmax-wasmrelaxedsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_25p4c__wasmrelaxedsimd(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
const v128_t vi9x0123 = wasm_v128_load(i9);
i9 += 4;
const v128_t vk9x0123 = wasm_v128_load(w + 40);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi9x0123, vk9x0123), vacc0123p0);
const v128_t vi10x0123 = wasm_v128_load(i10);
i10 += 4;
const v128_t vk10x0123 = wasm_v128_load(w + 44);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi10x0123, vk10x0123), vacc0123p0);
const v128_t vi11x0123 = wasm_v128_load(i11);
i11 += 4;
const v128_t vk11x0123 = wasm_v128_load(w + 48);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi11x0123, vk11x0123), vacc0123p0);
const v128_t vi12x0123 = wasm_v128_load(i12);
i12 += 4;
const v128_t vk12x0123 = wasm_v128_load(w + 52);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi12x0123, vk12x0123), vacc0123p0);
const v128_t vi13x0123 = wasm_v128_load(i13);
i13 += 4;
const v128_t vk13x0123 = wasm_v128_load(w + 56);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi13x0123, vk13x0123), vacc0123p0);
const v128_t vi14x0123 = wasm_v128_load(i14);
i14 += 4;
const v128_t vk14x0123 = wasm_v128_load(w + 60);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi14x0123, vk14x0123), vacc0123p0);
const v128_t vi15x0123 = wasm_v128_load(i15);
i15 += 4;
const v128_t vk15x0123 = wasm_v128_load(w + 64);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi15x0123, vk15x0123), vacc0123p0);
const v128_t vi16x0123 = wasm_v128_load(i16);
i16 += 4;
const v128_t vk16x0123 = wasm_v128_load(w + 68);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi16x0123, vk16x0123), vacc0123p0);
const v128_t vi17x0123 = wasm_v128_load(i17);
i17 += 4;
const v128_t vk17x0123 = wasm_v128_load(w + 72);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi17x0123, vk17x0123), vacc0123p0);
const v128_t vi18x0123 = wasm_v128_load(i18);
i18 += 4;
const v128_t vk18x0123 = wasm_v128_load(w + 76);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi18x0123, vk18x0123), vacc0123p0);
const v128_t vi19x0123 = wasm_v128_load(i19);
i19 += 4;
const v128_t vk19x0123 = wasm_v128_load(w + 80);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi19x0123, vk19x0123), vacc0123p0);
const v128_t vi20x0123 = wasm_v128_load(i20);
i20 += 4;
const v128_t vk20x0123 = wasm_v128_load(w + 84);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi20x0123, vk20x0123), vacc0123p0);
const v128_t vi21x0123 = wasm_v128_load(i21);
i21 += 4;
const v128_t vk21x0123 = wasm_v128_load(w + 88);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi21x0123, vk21x0123), vacc0123p0);
const v128_t vi22x0123 = wasm_v128_load(i22);
i22 += 4;
const v128_t vk22x0123 = wasm_v128_load(w + 92);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi22x0123, vk22x0123), vacc0123p0);
const v128_t vi23x0123 = wasm_v128_load(i23);
i23 += 4;
const v128_t vk23x0123 = wasm_v128_load(w + 96);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi23x0123, vk23x0123), vacc0123p0);
const v128_t vi24x0123 = wasm_v128_load(i24);
i24 += 4;
const v128_t vk24x0123 = wasm_v128_load(w + 100);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi24x0123, vk24x0123), vacc0123p0);
w += 104;
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
const v128_t vi9x0123 = wasm_v128_load(i9);
const v128_t vk9x0123 = wasm_v128_load(w + 40);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi9x0123, vk9x0123), vacc0123p0);
const v128_t vi10x0123 = wasm_v128_load(i10);
const v128_t vk10x0123 = wasm_v128_load(w + 44);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi10x0123, vk10x0123), vacc0123p0);
const v128_t vi11x0123 = wasm_v128_load(i11);
const v128_t vk11x0123 = wasm_v128_load(w + 48);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi11x0123, vk11x0123), vacc0123p0);
const v128_t vi12x0123 = wasm_v128_load(i12);
const v128_t vk12x0123 = wasm_v128_load(w + 52);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi12x0123, vk12x0123), vacc0123p0);
const v128_t vi13x0123 = wasm_v128_load(i13);
const v128_t vk13x0123 = wasm_v128_load(w + 56);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi13x0123, vk13x0123), vacc0123p0);
const v128_t vi14x0123 = wasm_v128_load(i14);
const v128_t vk14x0123 = wasm_v128_load(w + 60);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi14x0123, vk14x0123), vacc0123p0);
const v128_t vi15x0123 = wasm_v128_load(i15);
const v128_t vk15x0123 = wasm_v128_load(w + 64);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi15x0123, vk15x0123), vacc0123p0);
const v128_t vi16x0123 = wasm_v128_load(i16);
const v128_t vk16x0123 = wasm_v128_load(w + 68);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi16x0123, vk16x0123), vacc0123p0);
const v128_t vi17x0123 = wasm_v128_load(i17);
const v128_t vk17x0123 = wasm_v128_load(w + 72);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi17x0123, vk17x0123), vacc0123p0);
const v128_t vi18x0123 = wasm_v128_load(i18);
const v128_t vk18x0123 = wasm_v128_load(w + 76);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi18x0123, vk18x0123), vacc0123p0);
const v128_t vi19x0123 = wasm_v128_load(i19);
const v128_t vk19x0123 = wasm_v128_load(w + 80);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi19x0123, vk19x0123), vacc0123p0);
const v128_t vi20x0123 = wasm_v128_load(i20);
const v128_t vk20x0123 = wasm_v128_load(w + 84);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi20x0123, vk20x0123), vacc0123p0);
const v128_t vi21x0123 = wasm_v128_load(i21);
const v128_t vk21x0123 = wasm_v128_load(w + 88);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi21x0123, vk21x0123), vacc0123p0);
const v128_t vi22x0123 = wasm_v128_load(i22);
const v128_t vk22x0123 = wasm_v128_load(w + 92);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi22x0123, vk22x0123), vacc0123p0);
const v128_t vi23x0123 = wasm_v128_load(i23);
const v128_t vk23x0123 = wasm_v128_load(w + 96);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi23x0123, vk23x0123), vacc0123p0);
const v128_t vi24x0123 = wasm_v128_load(i24);
const v128_t vk24x0123 = wasm_v128_load(w + 100);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi24x0123, vk24x0123), vacc0123p0);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,003 | 34.643653 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p4c-minmax-wasmsimd-arm-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_25p4c__wasmsimd_arm_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
const v128_t vi9x0123 = wasm_v128_load(i9);
i9 += 4;
const v128_t vk9x0123 = wasm_v128_load(w + 40);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi9x0123, vk9x0123), vacc0123p1);
const v128_t vi10x0123 = wasm_v128_load(i10);
i10 += 4;
const v128_t vk10x0123 = wasm_v128_load(w + 44);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi10x0123, vk10x0123), vacc0123p0);
const v128_t vi11x0123 = wasm_v128_load(i11);
i11 += 4;
const v128_t vk11x0123 = wasm_v128_load(w + 48);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi11x0123, vk11x0123), vacc0123p1);
const v128_t vi12x0123 = wasm_v128_load(i12);
i12 += 4;
const v128_t vk12x0123 = wasm_v128_load(w + 52);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi12x0123, vk12x0123), vacc0123p0);
const v128_t vi13x0123 = wasm_v128_load(i13);
i13 += 4;
const v128_t vk13x0123 = wasm_v128_load(w + 56);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi13x0123, vk13x0123), vacc0123p1);
const v128_t vi14x0123 = wasm_v128_load(i14);
i14 += 4;
const v128_t vk14x0123 = wasm_v128_load(w + 60);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi14x0123, vk14x0123), vacc0123p0);
const v128_t vi15x0123 = wasm_v128_load(i15);
i15 += 4;
const v128_t vk15x0123 = wasm_v128_load(w + 64);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi15x0123, vk15x0123), vacc0123p1);
const v128_t vi16x0123 = wasm_v128_load(i16);
i16 += 4;
const v128_t vk16x0123 = wasm_v128_load(w + 68);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi16x0123, vk16x0123), vacc0123p0);
const v128_t vi17x0123 = wasm_v128_load(i17);
i17 += 4;
const v128_t vk17x0123 = wasm_v128_load(w + 72);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi17x0123, vk17x0123), vacc0123p1);
const v128_t vi18x0123 = wasm_v128_load(i18);
i18 += 4;
const v128_t vk18x0123 = wasm_v128_load(w + 76);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi18x0123, vk18x0123), vacc0123p0);
const v128_t vi19x0123 = wasm_v128_load(i19);
i19 += 4;
const v128_t vk19x0123 = wasm_v128_load(w + 80);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi19x0123, vk19x0123), vacc0123p1);
const v128_t vi20x0123 = wasm_v128_load(i20);
i20 += 4;
const v128_t vk20x0123 = wasm_v128_load(w + 84);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi20x0123, vk20x0123), vacc0123p0);
const v128_t vi21x0123 = wasm_v128_load(i21);
i21 += 4;
const v128_t vk21x0123 = wasm_v128_load(w + 88);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi21x0123, vk21x0123), vacc0123p1);
const v128_t vi22x0123 = wasm_v128_load(i22);
i22 += 4;
const v128_t vk22x0123 = wasm_v128_load(w + 92);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi22x0123, vk22x0123), vacc0123p0);
const v128_t vi23x0123 = wasm_v128_load(i23);
i23 += 4;
const v128_t vk23x0123 = wasm_v128_load(w + 96);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi23x0123, vk23x0123), vacc0123p1);
const v128_t vi24x0123 = wasm_v128_load(i24);
i24 += 4;
const v128_t vk24x0123 = wasm_v128_load(w + 100);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi24x0123, vk24x0123), vacc0123p0);
w += 104;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
const v128_t vi9x0123 = wasm_v128_load(i9);
const v128_t vk9x0123 = wasm_v128_load(w + 40);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi9x0123, vk9x0123), vacc0123p1);
const v128_t vi10x0123 = wasm_v128_load(i10);
const v128_t vk10x0123 = wasm_v128_load(w + 44);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi10x0123, vk10x0123), vacc0123p0);
const v128_t vi11x0123 = wasm_v128_load(i11);
const v128_t vk11x0123 = wasm_v128_load(w + 48);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi11x0123, vk11x0123), vacc0123p1);
const v128_t vi12x0123 = wasm_v128_load(i12);
const v128_t vk12x0123 = wasm_v128_load(w + 52);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi12x0123, vk12x0123), vacc0123p0);
const v128_t vi13x0123 = wasm_v128_load(i13);
const v128_t vk13x0123 = wasm_v128_load(w + 56);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi13x0123, vk13x0123), vacc0123p1);
const v128_t vi14x0123 = wasm_v128_load(i14);
const v128_t vk14x0123 = wasm_v128_load(w + 60);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi14x0123, vk14x0123), vacc0123p0);
const v128_t vi15x0123 = wasm_v128_load(i15);
const v128_t vk15x0123 = wasm_v128_load(w + 64);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi15x0123, vk15x0123), vacc0123p1);
const v128_t vi16x0123 = wasm_v128_load(i16);
const v128_t vk16x0123 = wasm_v128_load(w + 68);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi16x0123, vk16x0123), vacc0123p0);
const v128_t vi17x0123 = wasm_v128_load(i17);
const v128_t vk17x0123 = wasm_v128_load(w + 72);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi17x0123, vk17x0123), vacc0123p1);
const v128_t vi18x0123 = wasm_v128_load(i18);
const v128_t vk18x0123 = wasm_v128_load(w + 76);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi18x0123, vk18x0123), vacc0123p0);
const v128_t vi19x0123 = wasm_v128_load(i19);
const v128_t vk19x0123 = wasm_v128_load(w + 80);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi19x0123, vk19x0123), vacc0123p1);
const v128_t vi20x0123 = wasm_v128_load(i20);
const v128_t vk20x0123 = wasm_v128_load(w + 84);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi20x0123, vk20x0123), vacc0123p0);
const v128_t vi21x0123 = wasm_v128_load(i21);
const v128_t vk21x0123 = wasm_v128_load(w + 88);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi21x0123, vk21x0123), vacc0123p1);
const v128_t vi22x0123 = wasm_v128_load(i22);
const v128_t vk22x0123 = wasm_v128_load(w + 92);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi22x0123, vk22x0123), vacc0123p0);
const v128_t vi23x0123 = wasm_v128_load(i23);
const v128_t vk23x0123 = wasm_v128_load(w + 96);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi23x0123, vk23x0123), vacc0123p1);
const v128_t vi24x0123 = wasm_v128_load(i24);
const v128_t vk24x0123 = wasm_v128_load(w + 100);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi24x0123, vk24x0123), vacc0123p0);
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,103 | 34.549669 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p4c-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_25p4c__wasmsimd_arm(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
const v128_t vi9x0123 = wasm_v128_load(i9);
i9 += 4;
const v128_t vk9x0123 = wasm_v128_load(w + 40);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi9x0123, vk9x0123), vacc0123p0);
const v128_t vi10x0123 = wasm_v128_load(i10);
i10 += 4;
const v128_t vk10x0123 = wasm_v128_load(w + 44);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi10x0123, vk10x0123), vacc0123p0);
const v128_t vi11x0123 = wasm_v128_load(i11);
i11 += 4;
const v128_t vk11x0123 = wasm_v128_load(w + 48);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi11x0123, vk11x0123), vacc0123p0);
const v128_t vi12x0123 = wasm_v128_load(i12);
i12 += 4;
const v128_t vk12x0123 = wasm_v128_load(w + 52);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi12x0123, vk12x0123), vacc0123p0);
const v128_t vi13x0123 = wasm_v128_load(i13);
i13 += 4;
const v128_t vk13x0123 = wasm_v128_load(w + 56);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi13x0123, vk13x0123), vacc0123p0);
const v128_t vi14x0123 = wasm_v128_load(i14);
i14 += 4;
const v128_t vk14x0123 = wasm_v128_load(w + 60);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi14x0123, vk14x0123), vacc0123p0);
const v128_t vi15x0123 = wasm_v128_load(i15);
i15 += 4;
const v128_t vk15x0123 = wasm_v128_load(w + 64);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi15x0123, vk15x0123), vacc0123p0);
const v128_t vi16x0123 = wasm_v128_load(i16);
i16 += 4;
const v128_t vk16x0123 = wasm_v128_load(w + 68);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi16x0123, vk16x0123), vacc0123p0);
const v128_t vi17x0123 = wasm_v128_load(i17);
i17 += 4;
const v128_t vk17x0123 = wasm_v128_load(w + 72);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi17x0123, vk17x0123), vacc0123p0);
const v128_t vi18x0123 = wasm_v128_load(i18);
i18 += 4;
const v128_t vk18x0123 = wasm_v128_load(w + 76);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi18x0123, vk18x0123), vacc0123p0);
const v128_t vi19x0123 = wasm_v128_load(i19);
i19 += 4;
const v128_t vk19x0123 = wasm_v128_load(w + 80);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi19x0123, vk19x0123), vacc0123p0);
const v128_t vi20x0123 = wasm_v128_load(i20);
i20 += 4;
const v128_t vk20x0123 = wasm_v128_load(w + 84);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi20x0123, vk20x0123), vacc0123p0);
const v128_t vi21x0123 = wasm_v128_load(i21);
i21 += 4;
const v128_t vk21x0123 = wasm_v128_load(w + 88);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi21x0123, vk21x0123), vacc0123p0);
const v128_t vi22x0123 = wasm_v128_load(i22);
i22 += 4;
const v128_t vk22x0123 = wasm_v128_load(w + 92);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi22x0123, vk22x0123), vacc0123p0);
const v128_t vi23x0123 = wasm_v128_load(i23);
i23 += 4;
const v128_t vk23x0123 = wasm_v128_load(w + 96);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi23x0123, vk23x0123), vacc0123p0);
const v128_t vi24x0123 = wasm_v128_load(i24);
i24 += 4;
const v128_t vk24x0123 = wasm_v128_load(w + 100);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi24x0123, vk24x0123), vacc0123p0);
w += 104;
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
const v128_t vi9x0123 = wasm_v128_load(i9);
const v128_t vk9x0123 = wasm_v128_load(w + 40);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi9x0123, vk9x0123), vacc0123p0);
const v128_t vi10x0123 = wasm_v128_load(i10);
const v128_t vk10x0123 = wasm_v128_load(w + 44);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi10x0123, vk10x0123), vacc0123p0);
const v128_t vi11x0123 = wasm_v128_load(i11);
const v128_t vk11x0123 = wasm_v128_load(w + 48);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi11x0123, vk11x0123), vacc0123p0);
const v128_t vi12x0123 = wasm_v128_load(i12);
const v128_t vk12x0123 = wasm_v128_load(w + 52);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi12x0123, vk12x0123), vacc0123p0);
const v128_t vi13x0123 = wasm_v128_load(i13);
const v128_t vk13x0123 = wasm_v128_load(w + 56);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi13x0123, vk13x0123), vacc0123p0);
const v128_t vi14x0123 = wasm_v128_load(i14);
const v128_t vk14x0123 = wasm_v128_load(w + 60);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi14x0123, vk14x0123), vacc0123p0);
const v128_t vi15x0123 = wasm_v128_load(i15);
const v128_t vk15x0123 = wasm_v128_load(w + 64);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi15x0123, vk15x0123), vacc0123p0);
const v128_t vi16x0123 = wasm_v128_load(i16);
const v128_t vk16x0123 = wasm_v128_load(w + 68);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi16x0123, vk16x0123), vacc0123p0);
const v128_t vi17x0123 = wasm_v128_load(i17);
const v128_t vk17x0123 = wasm_v128_load(w + 72);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi17x0123, vk17x0123), vacc0123p0);
const v128_t vi18x0123 = wasm_v128_load(i18);
const v128_t vk18x0123 = wasm_v128_load(w + 76);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi18x0123, vk18x0123), vacc0123p0);
const v128_t vi19x0123 = wasm_v128_load(i19);
const v128_t vk19x0123 = wasm_v128_load(w + 80);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi19x0123, vk19x0123), vacc0123p0);
const v128_t vi20x0123 = wasm_v128_load(i20);
const v128_t vk20x0123 = wasm_v128_load(w + 84);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi20x0123, vk20x0123), vacc0123p0);
const v128_t vi21x0123 = wasm_v128_load(i21);
const v128_t vk21x0123 = wasm_v128_load(w + 88);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi21x0123, vk21x0123), vacc0123p0);
const v128_t vi22x0123 = wasm_v128_load(i22);
const v128_t vk22x0123 = wasm_v128_load(w + 92);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi22x0123, vk22x0123), vacc0123p0);
const v128_t vi23x0123 = wasm_v128_load(i23);
const v128_t vk23x0123 = wasm_v128_load(w + 96);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi23x0123, vk23x0123), vacc0123p0);
const v128_t vi24x0123 = wasm_v128_load(i24);
const v128_t vk24x0123 = wasm_v128_load(w + 100);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi24x0123, vk24x0123), vacc0123p0);
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,928 | 34.476615 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p4c-minmax-wasmsimd-x86-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_25p4c__wasmsimd_x86_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
const v128_t vi9x0123 = wasm_v128_load(i9);
i9 += 4;
const v128_t vk9x0123 = wasm_v128_load(w + 40);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi9x0123, vk9x0123), vacc0123p1);
const v128_t vi10x0123 = wasm_v128_load(i10);
i10 += 4;
const v128_t vk10x0123 = wasm_v128_load(w + 44);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi10x0123, vk10x0123), vacc0123p0);
const v128_t vi11x0123 = wasm_v128_load(i11);
i11 += 4;
const v128_t vk11x0123 = wasm_v128_load(w + 48);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi11x0123, vk11x0123), vacc0123p1);
const v128_t vi12x0123 = wasm_v128_load(i12);
i12 += 4;
const v128_t vk12x0123 = wasm_v128_load(w + 52);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi12x0123, vk12x0123), vacc0123p0);
const v128_t vi13x0123 = wasm_v128_load(i13);
i13 += 4;
const v128_t vk13x0123 = wasm_v128_load(w + 56);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi13x0123, vk13x0123), vacc0123p1);
const v128_t vi14x0123 = wasm_v128_load(i14);
i14 += 4;
const v128_t vk14x0123 = wasm_v128_load(w + 60);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi14x0123, vk14x0123), vacc0123p0);
const v128_t vi15x0123 = wasm_v128_load(i15);
i15 += 4;
const v128_t vk15x0123 = wasm_v128_load(w + 64);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi15x0123, vk15x0123), vacc0123p1);
const v128_t vi16x0123 = wasm_v128_load(i16);
i16 += 4;
const v128_t vk16x0123 = wasm_v128_load(w + 68);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi16x0123, vk16x0123), vacc0123p0);
const v128_t vi17x0123 = wasm_v128_load(i17);
i17 += 4;
const v128_t vk17x0123 = wasm_v128_load(w + 72);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi17x0123, vk17x0123), vacc0123p1);
const v128_t vi18x0123 = wasm_v128_load(i18);
i18 += 4;
const v128_t vk18x0123 = wasm_v128_load(w + 76);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi18x0123, vk18x0123), vacc0123p0);
const v128_t vi19x0123 = wasm_v128_load(i19);
i19 += 4;
const v128_t vk19x0123 = wasm_v128_load(w + 80);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi19x0123, vk19x0123), vacc0123p1);
const v128_t vi20x0123 = wasm_v128_load(i20);
i20 += 4;
const v128_t vk20x0123 = wasm_v128_load(w + 84);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi20x0123, vk20x0123), vacc0123p0);
const v128_t vi21x0123 = wasm_v128_load(i21);
i21 += 4;
const v128_t vk21x0123 = wasm_v128_load(w + 88);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi21x0123, vk21x0123), vacc0123p1);
const v128_t vi22x0123 = wasm_v128_load(i22);
i22 += 4;
const v128_t vk22x0123 = wasm_v128_load(w + 92);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi22x0123, vk22x0123), vacc0123p0);
const v128_t vi23x0123 = wasm_v128_load(i23);
i23 += 4;
const v128_t vk23x0123 = wasm_v128_load(w + 96);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi23x0123, vk23x0123), vacc0123p1);
const v128_t vi24x0123 = wasm_v128_load(i24);
i24 += 4;
const v128_t vk24x0123 = wasm_v128_load(w + 100);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi24x0123, vk24x0123), vacc0123p0);
w += 104;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p1);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p1);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
const v128_t vi9x0123 = wasm_v128_load(i9);
const v128_t vk9x0123 = wasm_v128_load(w + 40);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi9x0123, vk9x0123), vacc0123p1);
const v128_t vi10x0123 = wasm_v128_load(i10);
const v128_t vk10x0123 = wasm_v128_load(w + 44);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi10x0123, vk10x0123), vacc0123p0);
const v128_t vi11x0123 = wasm_v128_load(i11);
const v128_t vk11x0123 = wasm_v128_load(w + 48);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi11x0123, vk11x0123), vacc0123p1);
const v128_t vi12x0123 = wasm_v128_load(i12);
const v128_t vk12x0123 = wasm_v128_load(w + 52);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi12x0123, vk12x0123), vacc0123p0);
const v128_t vi13x0123 = wasm_v128_load(i13);
const v128_t vk13x0123 = wasm_v128_load(w + 56);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi13x0123, vk13x0123), vacc0123p1);
const v128_t vi14x0123 = wasm_v128_load(i14);
const v128_t vk14x0123 = wasm_v128_load(w + 60);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi14x0123, vk14x0123), vacc0123p0);
const v128_t vi15x0123 = wasm_v128_load(i15);
const v128_t vk15x0123 = wasm_v128_load(w + 64);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi15x0123, vk15x0123), vacc0123p1);
const v128_t vi16x0123 = wasm_v128_load(i16);
const v128_t vk16x0123 = wasm_v128_load(w + 68);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi16x0123, vk16x0123), vacc0123p0);
const v128_t vi17x0123 = wasm_v128_load(i17);
const v128_t vk17x0123 = wasm_v128_load(w + 72);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi17x0123, vk17x0123), vacc0123p1);
const v128_t vi18x0123 = wasm_v128_load(i18);
const v128_t vk18x0123 = wasm_v128_load(w + 76);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi18x0123, vk18x0123), vacc0123p0);
const v128_t vi19x0123 = wasm_v128_load(i19);
const v128_t vk19x0123 = wasm_v128_load(w + 80);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi19x0123, vk19x0123), vacc0123p1);
const v128_t vi20x0123 = wasm_v128_load(i20);
const v128_t vk20x0123 = wasm_v128_load(w + 84);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi20x0123, vk20x0123), vacc0123p0);
const v128_t vi21x0123 = wasm_v128_load(i21);
const v128_t vk21x0123 = wasm_v128_load(w + 88);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi21x0123, vk21x0123), vacc0123p1);
const v128_t vi22x0123 = wasm_v128_load(i22);
const v128_t vk22x0123 = wasm_v128_load(w + 92);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi22x0123, vk22x0123), vacc0123p0);
const v128_t vi23x0123 = wasm_v128_load(i23);
const v128_t vk23x0123 = wasm_v128_load(w + 96);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi23x0123, vk23x0123), vacc0123p1);
const v128_t vi24x0123 = wasm_v128_load(i24);
const v128_t vk24x0123 = wasm_v128_load(w + 100);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi24x0123, vk24x0123), vacc0123p0);
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,107 | 34.558499 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p4c-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_25p4c__wasmsimd_x86(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
const v128_t vi9x0123 = wasm_v128_load(i9);
i9 += 4;
const v128_t vk9x0123 = wasm_v128_load(w + 40);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi9x0123, vk9x0123), vacc0123p0);
const v128_t vi10x0123 = wasm_v128_load(i10);
i10 += 4;
const v128_t vk10x0123 = wasm_v128_load(w + 44);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi10x0123, vk10x0123), vacc0123p0);
const v128_t vi11x0123 = wasm_v128_load(i11);
i11 += 4;
const v128_t vk11x0123 = wasm_v128_load(w + 48);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi11x0123, vk11x0123), vacc0123p0);
const v128_t vi12x0123 = wasm_v128_load(i12);
i12 += 4;
const v128_t vk12x0123 = wasm_v128_load(w + 52);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi12x0123, vk12x0123), vacc0123p0);
const v128_t vi13x0123 = wasm_v128_load(i13);
i13 += 4;
const v128_t vk13x0123 = wasm_v128_load(w + 56);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi13x0123, vk13x0123), vacc0123p0);
const v128_t vi14x0123 = wasm_v128_load(i14);
i14 += 4;
const v128_t vk14x0123 = wasm_v128_load(w + 60);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi14x0123, vk14x0123), vacc0123p0);
const v128_t vi15x0123 = wasm_v128_load(i15);
i15 += 4;
const v128_t vk15x0123 = wasm_v128_load(w + 64);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi15x0123, vk15x0123), vacc0123p0);
const v128_t vi16x0123 = wasm_v128_load(i16);
i16 += 4;
const v128_t vk16x0123 = wasm_v128_load(w + 68);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi16x0123, vk16x0123), vacc0123p0);
const v128_t vi17x0123 = wasm_v128_load(i17);
i17 += 4;
const v128_t vk17x0123 = wasm_v128_load(w + 72);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi17x0123, vk17x0123), vacc0123p0);
const v128_t vi18x0123 = wasm_v128_load(i18);
i18 += 4;
const v128_t vk18x0123 = wasm_v128_load(w + 76);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi18x0123, vk18x0123), vacc0123p0);
const v128_t vi19x0123 = wasm_v128_load(i19);
i19 += 4;
const v128_t vk19x0123 = wasm_v128_load(w + 80);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi19x0123, vk19x0123), vacc0123p0);
const v128_t vi20x0123 = wasm_v128_load(i20);
i20 += 4;
const v128_t vk20x0123 = wasm_v128_load(w + 84);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi20x0123, vk20x0123), vacc0123p0);
const v128_t vi21x0123 = wasm_v128_load(i21);
i21 += 4;
const v128_t vk21x0123 = wasm_v128_load(w + 88);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi21x0123, vk21x0123), vacc0123p0);
const v128_t vi22x0123 = wasm_v128_load(i22);
i22 += 4;
const v128_t vk22x0123 = wasm_v128_load(w + 92);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi22x0123, vk22x0123), vacc0123p0);
const v128_t vi23x0123 = wasm_v128_load(i23);
i23 += 4;
const v128_t vk23x0123 = wasm_v128_load(w + 96);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi23x0123, vk23x0123), vacc0123p0);
const v128_t vi24x0123 = wasm_v128_load(i24);
i24 += 4;
const v128_t vk24x0123 = wasm_v128_load(w + 100);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi24x0123, vk24x0123), vacc0123p0);
w += 104;
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
const v128_t vi9x0123 = wasm_v128_load(i9);
const v128_t vk9x0123 = wasm_v128_load(w + 40);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi9x0123, vk9x0123), vacc0123p0);
const v128_t vi10x0123 = wasm_v128_load(i10);
const v128_t vk10x0123 = wasm_v128_load(w + 44);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi10x0123, vk10x0123), vacc0123p0);
const v128_t vi11x0123 = wasm_v128_load(i11);
const v128_t vk11x0123 = wasm_v128_load(w + 48);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi11x0123, vk11x0123), vacc0123p0);
const v128_t vi12x0123 = wasm_v128_load(i12);
const v128_t vk12x0123 = wasm_v128_load(w + 52);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi12x0123, vk12x0123), vacc0123p0);
const v128_t vi13x0123 = wasm_v128_load(i13);
const v128_t vk13x0123 = wasm_v128_load(w + 56);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi13x0123, vk13x0123), vacc0123p0);
const v128_t vi14x0123 = wasm_v128_load(i14);
const v128_t vk14x0123 = wasm_v128_load(w + 60);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi14x0123, vk14x0123), vacc0123p0);
const v128_t vi15x0123 = wasm_v128_load(i15);
const v128_t vk15x0123 = wasm_v128_load(w + 64);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi15x0123, vk15x0123), vacc0123p0);
const v128_t vi16x0123 = wasm_v128_load(i16);
const v128_t vk16x0123 = wasm_v128_load(w + 68);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi16x0123, vk16x0123), vacc0123p0);
const v128_t vi17x0123 = wasm_v128_load(i17);
const v128_t vk17x0123 = wasm_v128_load(w + 72);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi17x0123, vk17x0123), vacc0123p0);
const v128_t vi18x0123 = wasm_v128_load(i18);
const v128_t vk18x0123 = wasm_v128_load(w + 76);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi18x0123, vk18x0123), vacc0123p0);
const v128_t vi19x0123 = wasm_v128_load(i19);
const v128_t vk19x0123 = wasm_v128_load(w + 80);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi19x0123, vk19x0123), vacc0123p0);
const v128_t vi20x0123 = wasm_v128_load(i20);
const v128_t vk20x0123 = wasm_v128_load(w + 84);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi20x0123, vk20x0123), vacc0123p0);
const v128_t vi21x0123 = wasm_v128_load(i21);
const v128_t vk21x0123 = wasm_v128_load(w + 88);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi21x0123, vk21x0123), vacc0123p0);
const v128_t vi22x0123 = wasm_v128_load(i22);
const v128_t vk22x0123 = wasm_v128_load(w + 92);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi22x0123, vk22x0123), vacc0123p0);
const v128_t vi23x0123 = wasm_v128_load(i23);
const v128_t vk23x0123 = wasm_v128_load(w + 96);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi23x0123, vk23x0123), vacc0123p0);
const v128_t vi24x0123 = wasm_v128_load(i24);
const v128_t vk24x0123 = wasm_v128_load(w + 100);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi24x0123, vk24x0123), vacc0123p0);
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,932 | 34.485523 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p4c-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_ukernel_25p4c__wasmrelaxedsimd_fma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi5x0123, vk5x0123, vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi6x0123, vk6x0123, vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi7x0123, vk7x0123, vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi8x0123, vk8x0123, vacc0123p0);
const v128_t vi9x0123 = wasm_v128_load(i9);
i9 += 4;
const v128_t vk9x0123 = wasm_v128_load(w + 40);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi9x0123, vk9x0123, vacc0123p0);
const v128_t vi10x0123 = wasm_v128_load(i10);
i10 += 4;
const v128_t vk10x0123 = wasm_v128_load(w + 44);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi10x0123, vk10x0123, vacc0123p0);
const v128_t vi11x0123 = wasm_v128_load(i11);
i11 += 4;
const v128_t vk11x0123 = wasm_v128_load(w + 48);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi11x0123, vk11x0123, vacc0123p0);
const v128_t vi12x0123 = wasm_v128_load(i12);
i12 += 4;
const v128_t vk12x0123 = wasm_v128_load(w + 52);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi12x0123, vk12x0123, vacc0123p0);
const v128_t vi13x0123 = wasm_v128_load(i13);
i13 += 4;
const v128_t vk13x0123 = wasm_v128_load(w + 56);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi13x0123, vk13x0123, vacc0123p0);
const v128_t vi14x0123 = wasm_v128_load(i14);
i14 += 4;
const v128_t vk14x0123 = wasm_v128_load(w + 60);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi14x0123, vk14x0123, vacc0123p0);
const v128_t vi15x0123 = wasm_v128_load(i15);
i15 += 4;
const v128_t vk15x0123 = wasm_v128_load(w + 64);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi15x0123, vk15x0123, vacc0123p0);
const v128_t vi16x0123 = wasm_v128_load(i16);
i16 += 4;
const v128_t vk16x0123 = wasm_v128_load(w + 68);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi16x0123, vk16x0123, vacc0123p0);
const v128_t vi17x0123 = wasm_v128_load(i17);
i17 += 4;
const v128_t vk17x0123 = wasm_v128_load(w + 72);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi17x0123, vk17x0123, vacc0123p0);
const v128_t vi18x0123 = wasm_v128_load(i18);
i18 += 4;
const v128_t vk18x0123 = wasm_v128_load(w + 76);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi18x0123, vk18x0123, vacc0123p0);
const v128_t vi19x0123 = wasm_v128_load(i19);
i19 += 4;
const v128_t vk19x0123 = wasm_v128_load(w + 80);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi19x0123, vk19x0123, vacc0123p0);
const v128_t vi20x0123 = wasm_v128_load(i20);
i20 += 4;
const v128_t vk20x0123 = wasm_v128_load(w + 84);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi20x0123, vk20x0123, vacc0123p0);
const v128_t vi21x0123 = wasm_v128_load(i21);
i21 += 4;
const v128_t vk21x0123 = wasm_v128_load(w + 88);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi21x0123, vk21x0123, vacc0123p0);
const v128_t vi22x0123 = wasm_v128_load(i22);
i22 += 4;
const v128_t vk22x0123 = wasm_v128_load(w + 92);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi22x0123, vk22x0123, vacc0123p0);
const v128_t vi23x0123 = wasm_v128_load(i23);
i23 += 4;
const v128_t vk23x0123 = wasm_v128_load(w + 96);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi23x0123, vk23x0123, vacc0123p0);
const v128_t vi24x0123 = wasm_v128_load(i24);
i24 += 4;
const v128_t vk24x0123 = wasm_v128_load(w + 100);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi24x0123, vk24x0123, vacc0123p0);
w += 104;
const v128_t vacc0123 = vacc0123p0;
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi4x0123, vk4x0123, vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi5x0123, vk5x0123, vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi6x0123, vk6x0123, vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi7x0123, vk7x0123, vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi8x0123, vk8x0123, vacc0123p0);
const v128_t vi9x0123 = wasm_v128_load(i9);
const v128_t vk9x0123 = wasm_v128_load(w + 40);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi9x0123, vk9x0123, vacc0123p0);
const v128_t vi10x0123 = wasm_v128_load(i10);
const v128_t vk10x0123 = wasm_v128_load(w + 44);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi10x0123, vk10x0123, vacc0123p0);
const v128_t vi11x0123 = wasm_v128_load(i11);
const v128_t vk11x0123 = wasm_v128_load(w + 48);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi11x0123, vk11x0123, vacc0123p0);
const v128_t vi12x0123 = wasm_v128_load(i12);
const v128_t vk12x0123 = wasm_v128_load(w + 52);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi12x0123, vk12x0123, vacc0123p0);
const v128_t vi13x0123 = wasm_v128_load(i13);
const v128_t vk13x0123 = wasm_v128_load(w + 56);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi13x0123, vk13x0123, vacc0123p0);
const v128_t vi14x0123 = wasm_v128_load(i14);
const v128_t vk14x0123 = wasm_v128_load(w + 60);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi14x0123, vk14x0123, vacc0123p0);
const v128_t vi15x0123 = wasm_v128_load(i15);
const v128_t vk15x0123 = wasm_v128_load(w + 64);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi15x0123, vk15x0123, vacc0123p0);
const v128_t vi16x0123 = wasm_v128_load(i16);
const v128_t vk16x0123 = wasm_v128_load(w + 68);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi16x0123, vk16x0123, vacc0123p0);
const v128_t vi17x0123 = wasm_v128_load(i17);
const v128_t vk17x0123 = wasm_v128_load(w + 72);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi17x0123, vk17x0123, vacc0123p0);
const v128_t vi18x0123 = wasm_v128_load(i18);
const v128_t vk18x0123 = wasm_v128_load(w + 76);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi18x0123, vk18x0123, vacc0123p0);
const v128_t vi19x0123 = wasm_v128_load(i19);
const v128_t vk19x0123 = wasm_v128_load(w + 80);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi19x0123, vk19x0123, vacc0123p0);
const v128_t vi20x0123 = wasm_v128_load(i20);
const v128_t vk20x0123 = wasm_v128_load(w + 84);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi20x0123, vk20x0123, vacc0123p0);
const v128_t vi21x0123 = wasm_v128_load(i21);
const v128_t vk21x0123 = wasm_v128_load(w + 88);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi21x0123, vk21x0123, vacc0123p0);
const v128_t vi22x0123 = wasm_v128_load(i22);
const v128_t vk22x0123 = wasm_v128_load(w + 92);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi22x0123, vk22x0123, vacc0123p0);
const v128_t vi23x0123 = wasm_v128_load(i23);
const v128_t vk23x0123 = wasm_v128_load(w + 96);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi23x0123, vk23x0123, vacc0123p0);
const v128_t vi24x0123 = wasm_v128_load(i24);
const v128_t vk24x0123 = wasm_v128_load(w + 100);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi24x0123, vk24x0123, vacc0123p0);
v128_t vacc0123 = vacc0123p0;
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,806 | 34.601351 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p4c-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_ukernel_25p4c__wasmsimd(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
i4 += 4;
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
i5 += 4;
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
i6 += 4;
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
i7 += 4;
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
i8 += 4;
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
const v128_t vi9x0123 = wasm_v128_load(i9);
i9 += 4;
const v128_t vk9x0123 = wasm_v128_load(w + 40);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi9x0123, vk9x0123), vacc0123p0);
const v128_t vi10x0123 = wasm_v128_load(i10);
i10 += 4;
const v128_t vk10x0123 = wasm_v128_load(w + 44);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi10x0123, vk10x0123), vacc0123p0);
const v128_t vi11x0123 = wasm_v128_load(i11);
i11 += 4;
const v128_t vk11x0123 = wasm_v128_load(w + 48);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi11x0123, vk11x0123), vacc0123p0);
const v128_t vi12x0123 = wasm_v128_load(i12);
i12 += 4;
const v128_t vk12x0123 = wasm_v128_load(w + 52);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi12x0123, vk12x0123), vacc0123p0);
const v128_t vi13x0123 = wasm_v128_load(i13);
i13 += 4;
const v128_t vk13x0123 = wasm_v128_load(w + 56);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi13x0123, vk13x0123), vacc0123p0);
const v128_t vi14x0123 = wasm_v128_load(i14);
i14 += 4;
const v128_t vk14x0123 = wasm_v128_load(w + 60);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi14x0123, vk14x0123), vacc0123p0);
const v128_t vi15x0123 = wasm_v128_load(i15);
i15 += 4;
const v128_t vk15x0123 = wasm_v128_load(w + 64);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi15x0123, vk15x0123), vacc0123p0);
const v128_t vi16x0123 = wasm_v128_load(i16);
i16 += 4;
const v128_t vk16x0123 = wasm_v128_load(w + 68);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi16x0123, vk16x0123), vacc0123p0);
const v128_t vi17x0123 = wasm_v128_load(i17);
i17 += 4;
const v128_t vk17x0123 = wasm_v128_load(w + 72);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi17x0123, vk17x0123), vacc0123p0);
const v128_t vi18x0123 = wasm_v128_load(i18);
i18 += 4;
const v128_t vk18x0123 = wasm_v128_load(w + 76);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi18x0123, vk18x0123), vacc0123p0);
const v128_t vi19x0123 = wasm_v128_load(i19);
i19 += 4;
const v128_t vk19x0123 = wasm_v128_load(w + 80);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi19x0123, vk19x0123), vacc0123p0);
const v128_t vi20x0123 = wasm_v128_load(i20);
i20 += 4;
const v128_t vk20x0123 = wasm_v128_load(w + 84);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi20x0123, vk20x0123), vacc0123p0);
const v128_t vi21x0123 = wasm_v128_load(i21);
i21 += 4;
const v128_t vk21x0123 = wasm_v128_load(w + 88);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi21x0123, vk21x0123), vacc0123p0);
const v128_t vi22x0123 = wasm_v128_load(i22);
i22 += 4;
const v128_t vk22x0123 = wasm_v128_load(w + 92);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi22x0123, vk22x0123), vacc0123p0);
const v128_t vi23x0123 = wasm_v128_load(i23);
i23 += 4;
const v128_t vk23x0123 = wasm_v128_load(w + 96);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi23x0123, vk23x0123), vacc0123p0);
const v128_t vi24x0123 = wasm_v128_load(i24);
i24 += 4;
const v128_t vk24x0123 = wasm_v128_load(w + 100);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi24x0123, vk24x0123), vacc0123p0);
w += 104;
const v128_t vacc0123 = vacc0123p0;
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
const v128_t vi4x0123 = wasm_v128_load(i4);
const v128_t vk4x0123 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi4x0123, vk4x0123), vacc0123p0);
const v128_t vi5x0123 = wasm_v128_load(i5);
const v128_t vk5x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi5x0123, vk5x0123), vacc0123p0);
const v128_t vi6x0123 = wasm_v128_load(i6);
const v128_t vk6x0123 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi6x0123, vk6x0123), vacc0123p0);
const v128_t vi7x0123 = wasm_v128_load(i7);
const v128_t vk7x0123 = wasm_v128_load(w + 32);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi7x0123, vk7x0123), vacc0123p0);
const v128_t vi8x0123 = wasm_v128_load(i8);
const v128_t vk8x0123 = wasm_v128_load(w + 36);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi8x0123, vk8x0123), vacc0123p0);
const v128_t vi9x0123 = wasm_v128_load(i9);
const v128_t vk9x0123 = wasm_v128_load(w + 40);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi9x0123, vk9x0123), vacc0123p0);
const v128_t vi10x0123 = wasm_v128_load(i10);
const v128_t vk10x0123 = wasm_v128_load(w + 44);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi10x0123, vk10x0123), vacc0123p0);
const v128_t vi11x0123 = wasm_v128_load(i11);
const v128_t vk11x0123 = wasm_v128_load(w + 48);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi11x0123, vk11x0123), vacc0123p0);
const v128_t vi12x0123 = wasm_v128_load(i12);
const v128_t vk12x0123 = wasm_v128_load(w + 52);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi12x0123, vk12x0123), vacc0123p0);
const v128_t vi13x0123 = wasm_v128_load(i13);
const v128_t vk13x0123 = wasm_v128_load(w + 56);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi13x0123, vk13x0123), vacc0123p0);
const v128_t vi14x0123 = wasm_v128_load(i14);
const v128_t vk14x0123 = wasm_v128_load(w + 60);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi14x0123, vk14x0123), vacc0123p0);
const v128_t vi15x0123 = wasm_v128_load(i15);
const v128_t vk15x0123 = wasm_v128_load(w + 64);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi15x0123, vk15x0123), vacc0123p0);
const v128_t vi16x0123 = wasm_v128_load(i16);
const v128_t vk16x0123 = wasm_v128_load(w + 68);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi16x0123, vk16x0123), vacc0123p0);
const v128_t vi17x0123 = wasm_v128_load(i17);
const v128_t vk17x0123 = wasm_v128_load(w + 72);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi17x0123, vk17x0123), vacc0123p0);
const v128_t vi18x0123 = wasm_v128_load(i18);
const v128_t vk18x0123 = wasm_v128_load(w + 76);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi18x0123, vk18x0123), vacc0123p0);
const v128_t vi19x0123 = wasm_v128_load(i19);
const v128_t vk19x0123 = wasm_v128_load(w + 80);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi19x0123, vk19x0123), vacc0123p0);
const v128_t vi20x0123 = wasm_v128_load(i20);
const v128_t vk20x0123 = wasm_v128_load(w + 84);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi20x0123, vk20x0123), vacc0123p0);
const v128_t vi21x0123 = wasm_v128_load(i21);
const v128_t vk21x0123 = wasm_v128_load(w + 88);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi21x0123, vk21x0123), vacc0123p0);
const v128_t vi22x0123 = wasm_v128_load(i22);
const v128_t vk22x0123 = wasm_v128_load(w + 92);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi22x0123, vk22x0123), vacc0123p0);
const v128_t vi23x0123 = wasm_v128_load(i23);
const v128_t vk23x0123 = wasm_v128_load(w + 96);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi23x0123, vk23x0123), vacc0123p0);
const v128_t vi24x0123 = wasm_v128_load(i24);
const v128_t vk24x0123 = wasm_v128_load(w + 100);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi24x0123, vk24x0123), vacc0123p0);
v128_t vacc0123 = vacc0123p0;
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,645 | 34.238739 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p8c-minmax-avx-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_25p8c__avx_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
i6 += 8;
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
i7 += 8;
const __m256 vk7x01234567 = _mm256_load_ps(w + 64);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi7x01234567, vk7x01234567));
const __m256 vi8x01234567 = _mm256_loadu_ps(i8);
i8 += 8;
const __m256 vk8x01234567 = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi8x01234567, vk8x01234567));
const __m256 vi9x01234567 = _mm256_loadu_ps(i9);
i9 += 8;
const __m256 vk9x01234567 = _mm256_load_ps(w + 80);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi9x01234567, vk9x01234567));
const __m256 vi10x01234567 = _mm256_loadu_ps(i10);
i10 += 8;
const __m256 vk10x01234567 = _mm256_load_ps(w + 88);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi10x01234567, vk10x01234567));
const __m256 vi11x01234567 = _mm256_loadu_ps(i11);
i11 += 8;
const __m256 vk11x01234567 = _mm256_load_ps(w + 96);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi11x01234567, vk11x01234567));
const __m256 vi12x01234567 = _mm256_loadu_ps(i12);
i12 += 8;
const __m256 vk12x01234567 = _mm256_load_ps(w + 104);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi12x01234567, vk12x01234567));
const __m256 vi13x01234567 = _mm256_loadu_ps(i13);
i13 += 8;
const __m256 vk13x01234567 = _mm256_load_ps(w + 112);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi13x01234567, vk13x01234567));
const __m256 vi14x01234567 = _mm256_loadu_ps(i14);
i14 += 8;
const __m256 vk14x01234567 = _mm256_load_ps(w + 120);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi14x01234567, vk14x01234567));
const __m256 vi15x01234567 = _mm256_loadu_ps(i15);
i15 += 8;
const __m256 vk15x01234567 = _mm256_load_ps(w + 128);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi15x01234567, vk15x01234567));
const __m256 vi16x01234567 = _mm256_loadu_ps(i16);
i16 += 8;
const __m256 vk16x01234567 = _mm256_load_ps(w + 136);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi16x01234567, vk16x01234567));
const __m256 vi17x01234567 = _mm256_loadu_ps(i17);
i17 += 8;
const __m256 vk17x01234567 = _mm256_load_ps(w + 144);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi17x01234567, vk17x01234567));
const __m256 vi18x01234567 = _mm256_loadu_ps(i18);
i18 += 8;
const __m256 vk18x01234567 = _mm256_load_ps(w + 152);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi18x01234567, vk18x01234567));
const __m256 vi19x01234567 = _mm256_loadu_ps(i19);
i19 += 8;
const __m256 vk19x01234567 = _mm256_load_ps(w + 160);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi19x01234567, vk19x01234567));
const __m256 vi20x01234567 = _mm256_loadu_ps(i20);
i20 += 8;
const __m256 vk20x01234567 = _mm256_load_ps(w + 168);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi20x01234567, vk20x01234567));
const __m256 vi21x01234567 = _mm256_loadu_ps(i21);
i21 += 8;
const __m256 vk21x01234567 = _mm256_load_ps(w + 176);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi21x01234567, vk21x01234567));
const __m256 vi22x01234567 = _mm256_loadu_ps(i22);
i22 += 8;
const __m256 vk22x01234567 = _mm256_load_ps(w + 184);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi22x01234567, vk22x01234567));
const __m256 vi23x01234567 = _mm256_loadu_ps(i23);
i23 += 8;
const __m256 vk23x01234567 = _mm256_load_ps(w + 192);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi23x01234567, vk23x01234567));
const __m256 vi24x01234567 = _mm256_loadu_ps(i24);
i24 += 8;
const __m256 vk24x01234567 = _mm256_load_ps(w + 200);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi24x01234567, vk24x01234567));
w += 208;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
const __m256 vi7x01234567 = _mm256_maskload_ps(i7, vmask);
const __m256 vk7x01234567 = _mm256_load_ps(w + 64);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi7x01234567, vk7x01234567));
const __m256 vi8x01234567 = _mm256_maskload_ps(i8, vmask);
const __m256 vk8x01234567 = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi8x01234567, vk8x01234567));
const __m256 vi9x01234567 = _mm256_maskload_ps(i9, vmask);
const __m256 vk9x01234567 = _mm256_load_ps(w + 80);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi9x01234567, vk9x01234567));
const __m256 vi10x01234567 = _mm256_maskload_ps(i10, vmask);
const __m256 vk10x01234567 = _mm256_load_ps(w + 88);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi10x01234567, vk10x01234567));
const __m256 vi11x01234567 = _mm256_maskload_ps(i11, vmask);
const __m256 vk11x01234567 = _mm256_load_ps(w + 96);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi11x01234567, vk11x01234567));
const __m256 vi12x01234567 = _mm256_maskload_ps(i12, vmask);
const __m256 vk12x01234567 = _mm256_load_ps(w + 104);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi12x01234567, vk12x01234567));
const __m256 vi13x01234567 = _mm256_maskload_ps(i13, vmask);
const __m256 vk13x01234567 = _mm256_load_ps(w + 112);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi13x01234567, vk13x01234567));
const __m256 vi14x01234567 = _mm256_maskload_ps(i14, vmask);
const __m256 vk14x01234567 = _mm256_load_ps(w + 120);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi14x01234567, vk14x01234567));
const __m256 vi15x01234567 = _mm256_maskload_ps(i15, vmask);
const __m256 vk15x01234567 = _mm256_load_ps(w + 128);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi15x01234567, vk15x01234567));
const __m256 vi16x01234567 = _mm256_maskload_ps(i16, vmask);
const __m256 vk16x01234567 = _mm256_load_ps(w + 136);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi16x01234567, vk16x01234567));
const __m256 vi17x01234567 = _mm256_maskload_ps(i17, vmask);
const __m256 vk17x01234567 = _mm256_load_ps(w + 144);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi17x01234567, vk17x01234567));
const __m256 vi18x01234567 = _mm256_maskload_ps(i18, vmask);
const __m256 vk18x01234567 = _mm256_load_ps(w + 152);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi18x01234567, vk18x01234567));
const __m256 vi19x01234567 = _mm256_maskload_ps(i19, vmask);
const __m256 vk19x01234567 = _mm256_load_ps(w + 160);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi19x01234567, vk19x01234567));
const __m256 vi20x01234567 = _mm256_maskload_ps(i20, vmask);
const __m256 vk20x01234567 = _mm256_load_ps(w + 168);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi20x01234567, vk20x01234567));
const __m256 vi21x01234567 = _mm256_maskload_ps(i21, vmask);
const __m256 vk21x01234567 = _mm256_load_ps(w + 176);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi21x01234567, vk21x01234567));
const __m256 vi22x01234567 = _mm256_maskload_ps(i22, vmask);
const __m256 vk22x01234567 = _mm256_load_ps(w + 184);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi22x01234567, vk22x01234567));
const __m256 vi23x01234567 = _mm256_maskload_ps(i23, vmask);
const __m256 vk23x01234567 = _mm256_load_ps(w + 192);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi23x01234567, vk23x01234567));
const __m256 vi24x01234567 = _mm256_maskload_ps(i24, vmask);
const __m256 vk24x01234567 = _mm256_load_ps(w + 200);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi24x01234567, vk24x01234567));
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 17,875 | 37.692641 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p8c-minmax-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_25p8c__avx(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
i6 += 8;
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
i7 += 8;
const __m256 vk7x01234567 = _mm256_load_ps(w + 64);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi7x01234567, vk7x01234567));
const __m256 vi8x01234567 = _mm256_loadu_ps(i8);
i8 += 8;
const __m256 vk8x01234567 = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi8x01234567, vk8x01234567));
const __m256 vi9x01234567 = _mm256_loadu_ps(i9);
i9 += 8;
const __m256 vk9x01234567 = _mm256_load_ps(w + 80);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi9x01234567, vk9x01234567));
const __m256 vi10x01234567 = _mm256_loadu_ps(i10);
i10 += 8;
const __m256 vk10x01234567 = _mm256_load_ps(w + 88);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi10x01234567, vk10x01234567));
const __m256 vi11x01234567 = _mm256_loadu_ps(i11);
i11 += 8;
const __m256 vk11x01234567 = _mm256_load_ps(w + 96);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi11x01234567, vk11x01234567));
const __m256 vi12x01234567 = _mm256_loadu_ps(i12);
i12 += 8;
const __m256 vk12x01234567 = _mm256_load_ps(w + 104);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi12x01234567, vk12x01234567));
const __m256 vi13x01234567 = _mm256_loadu_ps(i13);
i13 += 8;
const __m256 vk13x01234567 = _mm256_load_ps(w + 112);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi13x01234567, vk13x01234567));
const __m256 vi14x01234567 = _mm256_loadu_ps(i14);
i14 += 8;
const __m256 vk14x01234567 = _mm256_load_ps(w + 120);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi14x01234567, vk14x01234567));
const __m256 vi15x01234567 = _mm256_loadu_ps(i15);
i15 += 8;
const __m256 vk15x01234567 = _mm256_load_ps(w + 128);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi15x01234567, vk15x01234567));
const __m256 vi16x01234567 = _mm256_loadu_ps(i16);
i16 += 8;
const __m256 vk16x01234567 = _mm256_load_ps(w + 136);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi16x01234567, vk16x01234567));
const __m256 vi17x01234567 = _mm256_loadu_ps(i17);
i17 += 8;
const __m256 vk17x01234567 = _mm256_load_ps(w + 144);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi17x01234567, vk17x01234567));
const __m256 vi18x01234567 = _mm256_loadu_ps(i18);
i18 += 8;
const __m256 vk18x01234567 = _mm256_load_ps(w + 152);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi18x01234567, vk18x01234567));
const __m256 vi19x01234567 = _mm256_loadu_ps(i19);
i19 += 8;
const __m256 vk19x01234567 = _mm256_load_ps(w + 160);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi19x01234567, vk19x01234567));
const __m256 vi20x01234567 = _mm256_loadu_ps(i20);
i20 += 8;
const __m256 vk20x01234567 = _mm256_load_ps(w + 168);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi20x01234567, vk20x01234567));
const __m256 vi21x01234567 = _mm256_loadu_ps(i21);
i21 += 8;
const __m256 vk21x01234567 = _mm256_load_ps(w + 176);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi21x01234567, vk21x01234567));
const __m256 vi22x01234567 = _mm256_loadu_ps(i22);
i22 += 8;
const __m256 vk22x01234567 = _mm256_load_ps(w + 184);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi22x01234567, vk22x01234567));
const __m256 vi23x01234567 = _mm256_loadu_ps(i23);
i23 += 8;
const __m256 vk23x01234567 = _mm256_load_ps(w + 192);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi23x01234567, vk23x01234567));
const __m256 vi24x01234567 = _mm256_loadu_ps(i24);
i24 += 8;
const __m256 vk24x01234567 = _mm256_load_ps(w + 200);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi24x01234567, vk24x01234567));
w += 208;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi5x01234567, vk5x01234567));
const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
const __m256 vi7x01234567 = _mm256_maskload_ps(i7, vmask);
const __m256 vk7x01234567 = _mm256_load_ps(w + 64);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi7x01234567, vk7x01234567));
const __m256 vi8x01234567 = _mm256_maskload_ps(i8, vmask);
const __m256 vk8x01234567 = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi8x01234567, vk8x01234567));
const __m256 vi9x01234567 = _mm256_maskload_ps(i9, vmask);
const __m256 vk9x01234567 = _mm256_load_ps(w + 80);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi9x01234567, vk9x01234567));
const __m256 vi10x01234567 = _mm256_maskload_ps(i10, vmask);
const __m256 vk10x01234567 = _mm256_load_ps(w + 88);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi10x01234567, vk10x01234567));
const __m256 vi11x01234567 = _mm256_maskload_ps(i11, vmask);
const __m256 vk11x01234567 = _mm256_load_ps(w + 96);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi11x01234567, vk11x01234567));
const __m256 vi12x01234567 = _mm256_maskload_ps(i12, vmask);
const __m256 vk12x01234567 = _mm256_load_ps(w + 104);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi12x01234567, vk12x01234567));
const __m256 vi13x01234567 = _mm256_maskload_ps(i13, vmask);
const __m256 vk13x01234567 = _mm256_load_ps(w + 112);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi13x01234567, vk13x01234567));
const __m256 vi14x01234567 = _mm256_maskload_ps(i14, vmask);
const __m256 vk14x01234567 = _mm256_load_ps(w + 120);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi14x01234567, vk14x01234567));
const __m256 vi15x01234567 = _mm256_maskload_ps(i15, vmask);
const __m256 vk15x01234567 = _mm256_load_ps(w + 128);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi15x01234567, vk15x01234567));
const __m256 vi16x01234567 = _mm256_maskload_ps(i16, vmask);
const __m256 vk16x01234567 = _mm256_load_ps(w + 136);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi16x01234567, vk16x01234567));
const __m256 vi17x01234567 = _mm256_maskload_ps(i17, vmask);
const __m256 vk17x01234567 = _mm256_load_ps(w + 144);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi17x01234567, vk17x01234567));
const __m256 vi18x01234567 = _mm256_maskload_ps(i18, vmask);
const __m256 vk18x01234567 = _mm256_load_ps(w + 152);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi18x01234567, vk18x01234567));
const __m256 vi19x01234567 = _mm256_maskload_ps(i19, vmask);
const __m256 vk19x01234567 = _mm256_load_ps(w + 160);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi19x01234567, vk19x01234567));
const __m256 vi20x01234567 = _mm256_maskload_ps(i20, vmask);
const __m256 vk20x01234567 = _mm256_load_ps(w + 168);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi20x01234567, vk20x01234567));
const __m256 vi21x01234567 = _mm256_maskload_ps(i21, vmask);
const __m256 vk21x01234567 = _mm256_load_ps(w + 176);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi21x01234567, vk21x01234567));
const __m256 vi22x01234567 = _mm256_maskload_ps(i22, vmask);
const __m256 vk22x01234567 = _mm256_load_ps(w + 184);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi22x01234567, vk22x01234567));
const __m256 vi23x01234567 = _mm256_maskload_ps(i23, vmask);
const __m256 vk23x01234567 = _mm256_load_ps(w + 192);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi23x01234567, vk23x01234567));
const __m256 vi24x01234567 = _mm256_maskload_ps(i24, vmask);
const __m256 vk24x01234567 = _mm256_load_ps(w + 200);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi24x01234567, vk24x01234567));
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 17,676 | 37.59607 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p8c-minmax-fma3-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_25p8c__fma3_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p1 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p1);
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
i6 += 8;
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
i7 += 8;
const __m256 vk7x01234567 = _mm256_load_ps(w + 64);
vacc01234567p1 = _mm256_fmadd_ps(vi7x01234567, vk7x01234567, vacc01234567p1);
const __m256 vi8x01234567 = _mm256_loadu_ps(i8);
i8 += 8;
const __m256 vk8x01234567 = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_fmadd_ps(vi8x01234567, vk8x01234567, vacc01234567p0);
const __m256 vi9x01234567 = _mm256_loadu_ps(i9);
i9 += 8;
const __m256 vk9x01234567 = _mm256_load_ps(w + 80);
vacc01234567p1 = _mm256_fmadd_ps(vi9x01234567, vk9x01234567, vacc01234567p1);
const __m256 vi10x01234567 = _mm256_loadu_ps(i10);
i10 += 8;
const __m256 vk10x01234567 = _mm256_load_ps(w + 88);
vacc01234567p0 = _mm256_fmadd_ps(vi10x01234567, vk10x01234567, vacc01234567p0);
const __m256 vi11x01234567 = _mm256_loadu_ps(i11);
i11 += 8;
const __m256 vk11x01234567 = _mm256_load_ps(w + 96);
vacc01234567p1 = _mm256_fmadd_ps(vi11x01234567, vk11x01234567, vacc01234567p1);
const __m256 vi12x01234567 = _mm256_loadu_ps(i12);
i12 += 8;
const __m256 vk12x01234567 = _mm256_load_ps(w + 104);
vacc01234567p0 = _mm256_fmadd_ps(vi12x01234567, vk12x01234567, vacc01234567p0);
const __m256 vi13x01234567 = _mm256_loadu_ps(i13);
i13 += 8;
const __m256 vk13x01234567 = _mm256_load_ps(w + 112);
vacc01234567p1 = _mm256_fmadd_ps(vi13x01234567, vk13x01234567, vacc01234567p1);
const __m256 vi14x01234567 = _mm256_loadu_ps(i14);
i14 += 8;
const __m256 vk14x01234567 = _mm256_load_ps(w + 120);
vacc01234567p0 = _mm256_fmadd_ps(vi14x01234567, vk14x01234567, vacc01234567p0);
const __m256 vi15x01234567 = _mm256_loadu_ps(i15);
i15 += 8;
const __m256 vk15x01234567 = _mm256_load_ps(w + 128);
vacc01234567p1 = _mm256_fmadd_ps(vi15x01234567, vk15x01234567, vacc01234567p1);
const __m256 vi16x01234567 = _mm256_loadu_ps(i16);
i16 += 8;
const __m256 vk16x01234567 = _mm256_load_ps(w + 136);
vacc01234567p0 = _mm256_fmadd_ps(vi16x01234567, vk16x01234567, vacc01234567p0);
const __m256 vi17x01234567 = _mm256_loadu_ps(i17);
i17 += 8;
const __m256 vk17x01234567 = _mm256_load_ps(w + 144);
vacc01234567p1 = _mm256_fmadd_ps(vi17x01234567, vk17x01234567, vacc01234567p1);
const __m256 vi18x01234567 = _mm256_loadu_ps(i18);
i18 += 8;
const __m256 vk18x01234567 = _mm256_load_ps(w + 152);
vacc01234567p0 = _mm256_fmadd_ps(vi18x01234567, vk18x01234567, vacc01234567p0);
const __m256 vi19x01234567 = _mm256_loadu_ps(i19);
i19 += 8;
const __m256 vk19x01234567 = _mm256_load_ps(w + 160);
vacc01234567p1 = _mm256_fmadd_ps(vi19x01234567, vk19x01234567, vacc01234567p1);
const __m256 vi20x01234567 = _mm256_loadu_ps(i20);
i20 += 8;
const __m256 vk20x01234567 = _mm256_load_ps(w + 168);
vacc01234567p0 = _mm256_fmadd_ps(vi20x01234567, vk20x01234567, vacc01234567p0);
const __m256 vi21x01234567 = _mm256_loadu_ps(i21);
i21 += 8;
const __m256 vk21x01234567 = _mm256_load_ps(w + 176);
vacc01234567p1 = _mm256_fmadd_ps(vi21x01234567, vk21x01234567, vacc01234567p1);
const __m256 vi22x01234567 = _mm256_loadu_ps(i22);
i22 += 8;
const __m256 vk22x01234567 = _mm256_load_ps(w + 184);
vacc01234567p0 = _mm256_fmadd_ps(vi22x01234567, vk22x01234567, vacc01234567p0);
const __m256 vi23x01234567 = _mm256_loadu_ps(i23);
i23 += 8;
const __m256 vk23x01234567 = _mm256_load_ps(w + 192);
vacc01234567p1 = _mm256_fmadd_ps(vi23x01234567, vk23x01234567, vacc01234567p1);
const __m256 vi24x01234567 = _mm256_loadu_ps(i24);
i24 += 8;
const __m256 vk24x01234567 = _mm256_load_ps(w + 200);
vacc01234567p0 = _mm256_fmadd_ps(vi24x01234567, vk24x01234567, vacc01234567p0);
w += 208;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p1 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p1);
const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
const __m256 vi7x01234567 = _mm256_maskload_ps(i7, vmask);
const __m256 vk7x01234567 = _mm256_load_ps(w + 64);
vacc01234567p1 = _mm256_fmadd_ps(vi7x01234567, vk7x01234567, vacc01234567p1);
const __m256 vi8x01234567 = _mm256_maskload_ps(i8, vmask);
const __m256 vk8x01234567 = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_fmadd_ps(vi8x01234567, vk8x01234567, vacc01234567p0);
const __m256 vi9x01234567 = _mm256_maskload_ps(i9, vmask);
const __m256 vk9x01234567 = _mm256_load_ps(w + 80);
vacc01234567p1 = _mm256_fmadd_ps(vi9x01234567, vk9x01234567, vacc01234567p1);
const __m256 vi10x01234567 = _mm256_maskload_ps(i10, vmask);
const __m256 vk10x01234567 = _mm256_load_ps(w + 88);
vacc01234567p0 = _mm256_fmadd_ps(vi10x01234567, vk10x01234567, vacc01234567p0);
const __m256 vi11x01234567 = _mm256_maskload_ps(i11, vmask);
const __m256 vk11x01234567 = _mm256_load_ps(w + 96);
vacc01234567p1 = _mm256_fmadd_ps(vi11x01234567, vk11x01234567, vacc01234567p1);
const __m256 vi12x01234567 = _mm256_maskload_ps(i12, vmask);
const __m256 vk12x01234567 = _mm256_load_ps(w + 104);
vacc01234567p0 = _mm256_fmadd_ps(vi12x01234567, vk12x01234567, vacc01234567p0);
const __m256 vi13x01234567 = _mm256_maskload_ps(i13, vmask);
const __m256 vk13x01234567 = _mm256_load_ps(w + 112);
vacc01234567p1 = _mm256_fmadd_ps(vi13x01234567, vk13x01234567, vacc01234567p1);
const __m256 vi14x01234567 = _mm256_maskload_ps(i14, vmask);
const __m256 vk14x01234567 = _mm256_load_ps(w + 120);
vacc01234567p0 = _mm256_fmadd_ps(vi14x01234567, vk14x01234567, vacc01234567p0);
const __m256 vi15x01234567 = _mm256_maskload_ps(i15, vmask);
const __m256 vk15x01234567 = _mm256_load_ps(w + 128);
vacc01234567p1 = _mm256_fmadd_ps(vi15x01234567, vk15x01234567, vacc01234567p1);
const __m256 vi16x01234567 = _mm256_maskload_ps(i16, vmask);
const __m256 vk16x01234567 = _mm256_load_ps(w + 136);
vacc01234567p0 = _mm256_fmadd_ps(vi16x01234567, vk16x01234567, vacc01234567p0);
const __m256 vi17x01234567 = _mm256_maskload_ps(i17, vmask);
const __m256 vk17x01234567 = _mm256_load_ps(w + 144);
vacc01234567p1 = _mm256_fmadd_ps(vi17x01234567, vk17x01234567, vacc01234567p1);
const __m256 vi18x01234567 = _mm256_maskload_ps(i18, vmask);
const __m256 vk18x01234567 = _mm256_load_ps(w + 152);
vacc01234567p0 = _mm256_fmadd_ps(vi18x01234567, vk18x01234567, vacc01234567p0);
const __m256 vi19x01234567 = _mm256_maskload_ps(i19, vmask);
const __m256 vk19x01234567 = _mm256_load_ps(w + 160);
vacc01234567p1 = _mm256_fmadd_ps(vi19x01234567, vk19x01234567, vacc01234567p1);
const __m256 vi20x01234567 = _mm256_maskload_ps(i20, vmask);
const __m256 vk20x01234567 = _mm256_load_ps(w + 168);
vacc01234567p0 = _mm256_fmadd_ps(vi20x01234567, vk20x01234567, vacc01234567p0);
const __m256 vi21x01234567 = _mm256_maskload_ps(i21, vmask);
const __m256 vk21x01234567 = _mm256_load_ps(w + 176);
vacc01234567p1 = _mm256_fmadd_ps(vi21x01234567, vk21x01234567, vacc01234567p1);
const __m256 vi22x01234567 = _mm256_maskload_ps(i22, vmask);
const __m256 vk22x01234567 = _mm256_load_ps(w + 184);
vacc01234567p0 = _mm256_fmadd_ps(vi22x01234567, vk22x01234567, vacc01234567p0);
const __m256 vi23x01234567 = _mm256_maskload_ps(i23, vmask);
const __m256 vk23x01234567 = _mm256_load_ps(w + 192);
vacc01234567p1 = _mm256_fmadd_ps(vi23x01234567, vk23x01234567, vacc01234567p1);
const __m256 vi24x01234567 = _mm256_maskload_ps(i24, vmask);
const __m256 vk24x01234567 = _mm256_load_ps(w + 200);
vacc01234567p0 = _mm256_fmadd_ps(vi24x01234567, vk24x01234567, vacc01234567p0);
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 17,252 | 36.344156 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p8c-minmax-fma3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_25p8c__fma3(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
i4 += 8;
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
i5 += 8;
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0);
const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
i6 += 8;
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
i7 += 8;
const __m256 vk7x01234567 = _mm256_load_ps(w + 64);
vacc01234567p0 = _mm256_fmadd_ps(vi7x01234567, vk7x01234567, vacc01234567p0);
const __m256 vi8x01234567 = _mm256_loadu_ps(i8);
i8 += 8;
const __m256 vk8x01234567 = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_fmadd_ps(vi8x01234567, vk8x01234567, vacc01234567p0);
const __m256 vi9x01234567 = _mm256_loadu_ps(i9);
i9 += 8;
const __m256 vk9x01234567 = _mm256_load_ps(w + 80);
vacc01234567p0 = _mm256_fmadd_ps(vi9x01234567, vk9x01234567, vacc01234567p0);
const __m256 vi10x01234567 = _mm256_loadu_ps(i10);
i10 += 8;
const __m256 vk10x01234567 = _mm256_load_ps(w + 88);
vacc01234567p0 = _mm256_fmadd_ps(vi10x01234567, vk10x01234567, vacc01234567p0);
const __m256 vi11x01234567 = _mm256_loadu_ps(i11);
i11 += 8;
const __m256 vk11x01234567 = _mm256_load_ps(w + 96);
vacc01234567p0 = _mm256_fmadd_ps(vi11x01234567, vk11x01234567, vacc01234567p0);
const __m256 vi12x01234567 = _mm256_loadu_ps(i12);
i12 += 8;
const __m256 vk12x01234567 = _mm256_load_ps(w + 104);
vacc01234567p0 = _mm256_fmadd_ps(vi12x01234567, vk12x01234567, vacc01234567p0);
const __m256 vi13x01234567 = _mm256_loadu_ps(i13);
i13 += 8;
const __m256 vk13x01234567 = _mm256_load_ps(w + 112);
vacc01234567p0 = _mm256_fmadd_ps(vi13x01234567, vk13x01234567, vacc01234567p0);
const __m256 vi14x01234567 = _mm256_loadu_ps(i14);
i14 += 8;
const __m256 vk14x01234567 = _mm256_load_ps(w + 120);
vacc01234567p0 = _mm256_fmadd_ps(vi14x01234567, vk14x01234567, vacc01234567p0);
const __m256 vi15x01234567 = _mm256_loadu_ps(i15);
i15 += 8;
const __m256 vk15x01234567 = _mm256_load_ps(w + 128);
vacc01234567p0 = _mm256_fmadd_ps(vi15x01234567, vk15x01234567, vacc01234567p0);
const __m256 vi16x01234567 = _mm256_loadu_ps(i16);
i16 += 8;
const __m256 vk16x01234567 = _mm256_load_ps(w + 136);
vacc01234567p0 = _mm256_fmadd_ps(vi16x01234567, vk16x01234567, vacc01234567p0);
const __m256 vi17x01234567 = _mm256_loadu_ps(i17);
i17 += 8;
const __m256 vk17x01234567 = _mm256_load_ps(w + 144);
vacc01234567p0 = _mm256_fmadd_ps(vi17x01234567, vk17x01234567, vacc01234567p0);
const __m256 vi18x01234567 = _mm256_loadu_ps(i18);
i18 += 8;
const __m256 vk18x01234567 = _mm256_load_ps(w + 152);
vacc01234567p0 = _mm256_fmadd_ps(vi18x01234567, vk18x01234567, vacc01234567p0);
const __m256 vi19x01234567 = _mm256_loadu_ps(i19);
i19 += 8;
const __m256 vk19x01234567 = _mm256_load_ps(w + 160);
vacc01234567p0 = _mm256_fmadd_ps(vi19x01234567, vk19x01234567, vacc01234567p0);
const __m256 vi20x01234567 = _mm256_loadu_ps(i20);
i20 += 8;
const __m256 vk20x01234567 = _mm256_load_ps(w + 168);
vacc01234567p0 = _mm256_fmadd_ps(vi20x01234567, vk20x01234567, vacc01234567p0);
const __m256 vi21x01234567 = _mm256_loadu_ps(i21);
i21 += 8;
const __m256 vk21x01234567 = _mm256_load_ps(w + 176);
vacc01234567p0 = _mm256_fmadd_ps(vi21x01234567, vk21x01234567, vacc01234567p0);
const __m256 vi22x01234567 = _mm256_loadu_ps(i22);
i22 += 8;
const __m256 vk22x01234567 = _mm256_load_ps(w + 184);
vacc01234567p0 = _mm256_fmadd_ps(vi22x01234567, vk22x01234567, vacc01234567p0);
const __m256 vi23x01234567 = _mm256_loadu_ps(i23);
i23 += 8;
const __m256 vk23x01234567 = _mm256_load_ps(w + 192);
vacc01234567p0 = _mm256_fmadd_ps(vi23x01234567, vk23x01234567, vacc01234567p0);
const __m256 vi24x01234567 = _mm256_loadu_ps(i24);
i24 += 8;
const __m256 vk24x01234567 = _mm256_load_ps(w + 200);
vacc01234567p0 = _mm256_fmadd_ps(vi24x01234567, vk24x01234567, vacc01234567p0);
w += 208;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
const __m256 vk4x01234567 = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
const __m256 vk5x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p0);
const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
const __m256 vk6x01234567 = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
const __m256 vi7x01234567 = _mm256_maskload_ps(i7, vmask);
const __m256 vk7x01234567 = _mm256_load_ps(w + 64);
vacc01234567p0 = _mm256_fmadd_ps(vi7x01234567, vk7x01234567, vacc01234567p0);
const __m256 vi8x01234567 = _mm256_maskload_ps(i8, vmask);
const __m256 vk8x01234567 = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_fmadd_ps(vi8x01234567, vk8x01234567, vacc01234567p0);
const __m256 vi9x01234567 = _mm256_maskload_ps(i9, vmask);
const __m256 vk9x01234567 = _mm256_load_ps(w + 80);
vacc01234567p0 = _mm256_fmadd_ps(vi9x01234567, vk9x01234567, vacc01234567p0);
const __m256 vi10x01234567 = _mm256_maskload_ps(i10, vmask);
const __m256 vk10x01234567 = _mm256_load_ps(w + 88);
vacc01234567p0 = _mm256_fmadd_ps(vi10x01234567, vk10x01234567, vacc01234567p0);
const __m256 vi11x01234567 = _mm256_maskload_ps(i11, vmask);
const __m256 vk11x01234567 = _mm256_load_ps(w + 96);
vacc01234567p0 = _mm256_fmadd_ps(vi11x01234567, vk11x01234567, vacc01234567p0);
const __m256 vi12x01234567 = _mm256_maskload_ps(i12, vmask);
const __m256 vk12x01234567 = _mm256_load_ps(w + 104);
vacc01234567p0 = _mm256_fmadd_ps(vi12x01234567, vk12x01234567, vacc01234567p0);
const __m256 vi13x01234567 = _mm256_maskload_ps(i13, vmask);
const __m256 vk13x01234567 = _mm256_load_ps(w + 112);
vacc01234567p0 = _mm256_fmadd_ps(vi13x01234567, vk13x01234567, vacc01234567p0);
const __m256 vi14x01234567 = _mm256_maskload_ps(i14, vmask);
const __m256 vk14x01234567 = _mm256_load_ps(w + 120);
vacc01234567p0 = _mm256_fmadd_ps(vi14x01234567, vk14x01234567, vacc01234567p0);
const __m256 vi15x01234567 = _mm256_maskload_ps(i15, vmask);
const __m256 vk15x01234567 = _mm256_load_ps(w + 128);
vacc01234567p0 = _mm256_fmadd_ps(vi15x01234567, vk15x01234567, vacc01234567p0);
const __m256 vi16x01234567 = _mm256_maskload_ps(i16, vmask);
const __m256 vk16x01234567 = _mm256_load_ps(w + 136);
vacc01234567p0 = _mm256_fmadd_ps(vi16x01234567, vk16x01234567, vacc01234567p0);
const __m256 vi17x01234567 = _mm256_maskload_ps(i17, vmask);
const __m256 vk17x01234567 = _mm256_load_ps(w + 144);
vacc01234567p0 = _mm256_fmadd_ps(vi17x01234567, vk17x01234567, vacc01234567p0);
const __m256 vi18x01234567 = _mm256_maskload_ps(i18, vmask);
const __m256 vk18x01234567 = _mm256_load_ps(w + 152);
vacc01234567p0 = _mm256_fmadd_ps(vi18x01234567, vk18x01234567, vacc01234567p0);
const __m256 vi19x01234567 = _mm256_maskload_ps(i19, vmask);
const __m256 vk19x01234567 = _mm256_load_ps(w + 160);
vacc01234567p0 = _mm256_fmadd_ps(vi19x01234567, vk19x01234567, vacc01234567p0);
const __m256 vi20x01234567 = _mm256_maskload_ps(i20, vmask);
const __m256 vk20x01234567 = _mm256_load_ps(w + 168);
vacc01234567p0 = _mm256_fmadd_ps(vi20x01234567, vk20x01234567, vacc01234567p0);
const __m256 vi21x01234567 = _mm256_maskload_ps(i21, vmask);
const __m256 vk21x01234567 = _mm256_load_ps(w + 176);
vacc01234567p0 = _mm256_fmadd_ps(vi21x01234567, vk21x01234567, vacc01234567p0);
const __m256 vi22x01234567 = _mm256_maskload_ps(i22, vmask);
const __m256 vk22x01234567 = _mm256_load_ps(w + 184);
vacc01234567p0 = _mm256_fmadd_ps(vi22x01234567, vk22x01234567, vacc01234567p0);
const __m256 vi23x01234567 = _mm256_maskload_ps(i23, vmask);
const __m256 vk23x01234567 = _mm256_load_ps(w + 192);
vacc01234567p0 = _mm256_fmadd_ps(vi23x01234567, vk23x01234567, vacc01234567p0);
const __m256 vi24x01234567 = _mm256_maskload_ps(i24, vmask);
const __m256 vk24x01234567 = _mm256_load_ps(w + 200);
vacc01234567p0 = _mm256_fmadd_ps(vi24x01234567, vk24x01234567, vacc01234567p0);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 17,027 | 36.179039 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p8c-minmax-neon-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_25p8c__neon_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi5x4567, vk5x4567);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi6x4567, vk6x4567);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi7x4567, vk7x4567);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vi8x4567 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk8x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi8x4567, vk8x4567);
const float32x4_t vi9x0123 = vld1q_f32(i9); i9 += 4;
const float32x4_t vi9x4567 = vld1q_f32(i9); i9 += 4;
const float32x4_t vk9x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk9x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi9x0123, vk9x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi9x4567, vk9x4567);
const float32x4_t vi10x0123 = vld1q_f32(i10); i10 += 4;
const float32x4_t vi10x4567 = vld1q_f32(i10); i10 += 4;
const float32x4_t vk10x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk10x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi10x0123, vk10x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi10x4567, vk10x4567);
const float32x4_t vi11x0123 = vld1q_f32(i11); i11 += 4;
const float32x4_t vi11x4567 = vld1q_f32(i11); i11 += 4;
const float32x4_t vk11x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk11x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi11x0123, vk11x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi11x4567, vk11x4567);
const float32x4_t vi12x0123 = vld1q_f32(i12); i12 += 4;
const float32x4_t vi12x4567 = vld1q_f32(i12); i12 += 4;
const float32x4_t vk12x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk12x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi12x0123, vk12x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi12x4567, vk12x4567);
const float32x4_t vi13x0123 = vld1q_f32(i13); i13 += 4;
const float32x4_t vi13x4567 = vld1q_f32(i13); i13 += 4;
const float32x4_t vk13x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk13x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi13x0123, vk13x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi13x4567, vk13x4567);
const float32x4_t vi14x0123 = vld1q_f32(i14); i14 += 4;
const float32x4_t vi14x4567 = vld1q_f32(i14); i14 += 4;
const float32x4_t vk14x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk14x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi14x0123, vk14x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi14x4567, vk14x4567);
const float32x4_t vi15x0123 = vld1q_f32(i15); i15 += 4;
const float32x4_t vi15x4567 = vld1q_f32(i15); i15 += 4;
const float32x4_t vk15x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk15x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi15x0123, vk15x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi15x4567, vk15x4567);
const float32x4_t vi16x0123 = vld1q_f32(i16); i16 += 4;
const float32x4_t vi16x4567 = vld1q_f32(i16); i16 += 4;
const float32x4_t vk16x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk16x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi16x0123, vk16x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi16x4567, vk16x4567);
const float32x4_t vi17x0123 = vld1q_f32(i17); i17 += 4;
const float32x4_t vi17x4567 = vld1q_f32(i17); i17 += 4;
const float32x4_t vk17x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk17x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi17x0123, vk17x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi17x4567, vk17x4567);
const float32x4_t vi18x0123 = vld1q_f32(i18); i18 += 4;
const float32x4_t vi18x4567 = vld1q_f32(i18); i18 += 4;
const float32x4_t vk18x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk18x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi18x0123, vk18x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi18x4567, vk18x4567);
const float32x4_t vi19x0123 = vld1q_f32(i19); i19 += 4;
const float32x4_t vi19x4567 = vld1q_f32(i19); i19 += 4;
const float32x4_t vk19x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk19x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi19x0123, vk19x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi19x4567, vk19x4567);
const float32x4_t vi20x0123 = vld1q_f32(i20); i20 += 4;
const float32x4_t vi20x4567 = vld1q_f32(i20); i20 += 4;
const float32x4_t vk20x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk20x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi20x0123, vk20x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi20x4567, vk20x4567);
const float32x4_t vi21x0123 = vld1q_f32(i21); i21 += 4;
const float32x4_t vi21x4567 = vld1q_f32(i21); i21 += 4;
const float32x4_t vk21x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk21x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi21x0123, vk21x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi21x4567, vk21x4567);
const float32x4_t vi22x0123 = vld1q_f32(i22); i22 += 4;
const float32x4_t vi22x4567 = vld1q_f32(i22); i22 += 4;
const float32x4_t vk22x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk22x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi22x0123, vk22x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi22x4567, vk22x4567);
const float32x4_t vi23x0123 = vld1q_f32(i23); i23 += 4;
const float32x4_t vi23x4567 = vld1q_f32(i23); i23 += 4;
const float32x4_t vk23x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk23x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi23x0123, vk23x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi23x4567, vk23x4567);
const float32x4_t vi24x0123 = vld1q_f32(i24); i24 += 4;
const float32x4_t vi24x4567 = vld1q_f32(i24); i24 += 4;
const float32x4_t vk24x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk24x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi24x0123, vk24x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi24x4567, vk24x4567);
// Add up all accumulators to vacc01234567p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 4);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 12);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 20);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w + 28);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w + 36);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w + 44);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w + 52);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w + 60);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w + 68);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
const float32x4_t vi9x0123 = vld1q_f32(i9); i9 += 4;
const float32x4_t vk9x0123 = vld1q_f32(w + 76);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi9x0123, vk9x0123);
const float32x4_t vi10x0123 = vld1q_f32(i10); i10 += 4;
const float32x4_t vk10x0123 = vld1q_f32(w + 84);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi10x0123, vk10x0123);
const float32x4_t vi11x0123 = vld1q_f32(i11); i11 += 4;
const float32x4_t vk11x0123 = vld1q_f32(w + 92);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi11x0123, vk11x0123);
const float32x4_t vi12x0123 = vld1q_f32(i12); i12 += 4;
const float32x4_t vk12x0123 = vld1q_f32(w + 100);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi12x0123, vk12x0123);
const float32x4_t vi13x0123 = vld1q_f32(i13); i13 += 4;
const float32x4_t vk13x0123 = vld1q_f32(w + 108);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi13x0123, vk13x0123);
const float32x4_t vi14x0123 = vld1q_f32(i14); i14 += 4;
const float32x4_t vk14x0123 = vld1q_f32(w + 116);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi14x0123, vk14x0123);
const float32x4_t vi15x0123 = vld1q_f32(i15); i15 += 4;
const float32x4_t vk15x0123 = vld1q_f32(w + 124);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi15x0123, vk15x0123);
const float32x4_t vi16x0123 = vld1q_f32(i16); i16 += 4;
const float32x4_t vk16x0123 = vld1q_f32(w + 132);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi16x0123, vk16x0123);
const float32x4_t vi17x0123 = vld1q_f32(i17); i17 += 4;
const float32x4_t vk17x0123 = vld1q_f32(w + 140);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi17x0123, vk17x0123);
const float32x4_t vi18x0123 = vld1q_f32(i18); i18 += 4;
const float32x4_t vk18x0123 = vld1q_f32(w + 148);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi18x0123, vk18x0123);
const float32x4_t vi19x0123 = vld1q_f32(i19); i19 += 4;
const float32x4_t vk19x0123 = vld1q_f32(w + 156);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi19x0123, vk19x0123);
const float32x4_t vi20x0123 = vld1q_f32(i20); i20 += 4;
const float32x4_t vk20x0123 = vld1q_f32(w + 164);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi20x0123, vk20x0123);
const float32x4_t vi21x0123 = vld1q_f32(i21); i21 += 4;
const float32x4_t vk21x0123 = vld1q_f32(w + 172);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi21x0123, vk21x0123);
const float32x4_t vi22x0123 = vld1q_f32(i22); i22 += 4;
const float32x4_t vk22x0123 = vld1q_f32(w + 180);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi22x0123, vk22x0123);
const float32x4_t vi23x0123 = vld1q_f32(i23); i23 += 4;
const float32x4_t vk23x0123 = vld1q_f32(w + 188);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi23x0123, vk23x0123);
const float32x4_t vi24x0123 = vld1q_f32(i24); i24 += 4;
const float32x4_t vk24x0123 = vld1q_f32(w + 196);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi24x0123, vk24x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 8);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 16);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 24);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w + 32);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
const float32x4_t vk4x0123 = vld1q_f32(w + 40);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
const float32x4_t vk5x0123 = vld1q_f32(w + 48);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
const float32x4_t vk6x0123 = vld1q_f32(w + 56);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
const float32x4_t vk7x0123 = vld1q_f32(w + 64);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
const float32x4_t vk8x0123 = vld1q_f32(w + 72);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
const float32x4_t vi9x0123 = vld1q_f32(i9);
const float32x4_t vk9x0123 = vld1q_f32(w + 80);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi9x0123, vk9x0123);
const float32x4_t vi10x0123 = vld1q_f32(i10);
const float32x4_t vk10x0123 = vld1q_f32(w + 88);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi10x0123, vk10x0123);
const float32x4_t vi11x0123 = vld1q_f32(i11);
const float32x4_t vk11x0123 = vld1q_f32(w + 96);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi11x0123, vk11x0123);
const float32x4_t vi12x0123 = vld1q_f32(i12);
const float32x4_t vk12x0123 = vld1q_f32(w + 104);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi12x0123, vk12x0123);
const float32x4_t vi13x0123 = vld1q_f32(i13);
const float32x4_t vk13x0123 = vld1q_f32(w + 112);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi13x0123, vk13x0123);
const float32x4_t vi14x0123 = vld1q_f32(i14);
const float32x4_t vk14x0123 = vld1q_f32(w + 120);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi14x0123, vk14x0123);
const float32x4_t vi15x0123 = vld1q_f32(i15);
const float32x4_t vk15x0123 = vld1q_f32(w + 128);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi15x0123, vk15x0123);
const float32x4_t vi16x0123 = vld1q_f32(i16);
const float32x4_t vk16x0123 = vld1q_f32(w + 136);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi16x0123, vk16x0123);
const float32x4_t vi17x0123 = vld1q_f32(i17);
const float32x4_t vk17x0123 = vld1q_f32(w + 144);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi17x0123, vk17x0123);
const float32x4_t vi18x0123 = vld1q_f32(i18);
const float32x4_t vk18x0123 = vld1q_f32(w + 152);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi18x0123, vk18x0123);
const float32x4_t vi19x0123 = vld1q_f32(i19);
const float32x4_t vk19x0123 = vld1q_f32(w + 160);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi19x0123, vk19x0123);
const float32x4_t vi20x0123 = vld1q_f32(i20);
const float32x4_t vk20x0123 = vld1q_f32(w + 168);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi20x0123, vk20x0123);
const float32x4_t vi21x0123 = vld1q_f32(i21);
const float32x4_t vk21x0123 = vld1q_f32(w + 176);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi21x0123, vk21x0123);
const float32x4_t vi22x0123 = vld1q_f32(i22);
const float32x4_t vk22x0123 = vld1q_f32(w + 184);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi22x0123, vk22x0123);
const float32x4_t vi23x0123 = vld1q_f32(i23);
const float32x4_t vk23x0123 = vld1q_f32(w + 192);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi23x0123, vk23x0123);
const float32x4_t vi24x0123 = vld1q_f32(i24);
const float32x4_t vk24x0123 = vld1q_f32(w + 200);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi24x0123, vk24x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 24,571 | 40.506757 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p8c-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_25p8c__neon(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi5x4567, vk5x4567);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi6x4567, vk6x4567);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi7x0123, vk7x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi7x4567, vk7x4567);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vi8x4567 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk8x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi8x4567, vk8x4567);
const float32x4_t vi9x0123 = vld1q_f32(i9); i9 += 4;
const float32x4_t vi9x4567 = vld1q_f32(i9); i9 += 4;
const float32x4_t vk9x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk9x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi9x0123, vk9x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi9x4567, vk9x4567);
const float32x4_t vi10x0123 = vld1q_f32(i10); i10 += 4;
const float32x4_t vi10x4567 = vld1q_f32(i10); i10 += 4;
const float32x4_t vk10x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk10x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi10x0123, vk10x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi10x4567, vk10x4567);
const float32x4_t vi11x0123 = vld1q_f32(i11); i11 += 4;
const float32x4_t vi11x4567 = vld1q_f32(i11); i11 += 4;
const float32x4_t vk11x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk11x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi11x0123, vk11x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi11x4567, vk11x4567);
const float32x4_t vi12x0123 = vld1q_f32(i12); i12 += 4;
const float32x4_t vi12x4567 = vld1q_f32(i12); i12 += 4;
const float32x4_t vk12x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk12x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi12x0123, vk12x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi12x4567, vk12x4567);
const float32x4_t vi13x0123 = vld1q_f32(i13); i13 += 4;
const float32x4_t vi13x4567 = vld1q_f32(i13); i13 += 4;
const float32x4_t vk13x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk13x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi13x0123, vk13x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi13x4567, vk13x4567);
const float32x4_t vi14x0123 = vld1q_f32(i14); i14 += 4;
const float32x4_t vi14x4567 = vld1q_f32(i14); i14 += 4;
const float32x4_t vk14x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk14x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi14x0123, vk14x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi14x4567, vk14x4567);
const float32x4_t vi15x0123 = vld1q_f32(i15); i15 += 4;
const float32x4_t vi15x4567 = vld1q_f32(i15); i15 += 4;
const float32x4_t vk15x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk15x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi15x0123, vk15x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi15x4567, vk15x4567);
const float32x4_t vi16x0123 = vld1q_f32(i16); i16 += 4;
const float32x4_t vi16x4567 = vld1q_f32(i16); i16 += 4;
const float32x4_t vk16x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk16x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi16x0123, vk16x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi16x4567, vk16x4567);
const float32x4_t vi17x0123 = vld1q_f32(i17); i17 += 4;
const float32x4_t vi17x4567 = vld1q_f32(i17); i17 += 4;
const float32x4_t vk17x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk17x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi17x0123, vk17x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi17x4567, vk17x4567);
const float32x4_t vi18x0123 = vld1q_f32(i18); i18 += 4;
const float32x4_t vi18x4567 = vld1q_f32(i18); i18 += 4;
const float32x4_t vk18x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk18x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi18x0123, vk18x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi18x4567, vk18x4567);
const float32x4_t vi19x0123 = vld1q_f32(i19); i19 += 4;
const float32x4_t vi19x4567 = vld1q_f32(i19); i19 += 4;
const float32x4_t vk19x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk19x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi19x0123, vk19x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi19x4567, vk19x4567);
const float32x4_t vi20x0123 = vld1q_f32(i20); i20 += 4;
const float32x4_t vi20x4567 = vld1q_f32(i20); i20 += 4;
const float32x4_t vk20x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk20x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi20x0123, vk20x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi20x4567, vk20x4567);
const float32x4_t vi21x0123 = vld1q_f32(i21); i21 += 4;
const float32x4_t vi21x4567 = vld1q_f32(i21); i21 += 4;
const float32x4_t vk21x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk21x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi21x0123, vk21x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi21x4567, vk21x4567);
const float32x4_t vi22x0123 = vld1q_f32(i22); i22 += 4;
const float32x4_t vi22x4567 = vld1q_f32(i22); i22 += 4;
const float32x4_t vk22x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk22x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi22x0123, vk22x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi22x4567, vk22x4567);
const float32x4_t vi23x0123 = vld1q_f32(i23); i23 += 4;
const float32x4_t vi23x4567 = vld1q_f32(i23); i23 += 4;
const float32x4_t vk23x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk23x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi23x0123, vk23x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi23x4567, vk23x4567);
const float32x4_t vi24x0123 = vld1q_f32(i24); i24 += 4;
const float32x4_t vi24x4567 = vld1q_f32(i24); i24 += 4;
const float32x4_t vk24x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk24x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi24x0123, vk24x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi24x4567, vk24x4567);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 4);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 12);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 20);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w + 28);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w + 36);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w + 44);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w + 52);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w + 60);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w + 68);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
const float32x4_t vi9x0123 = vld1q_f32(i9); i9 += 4;
const float32x4_t vk9x0123 = vld1q_f32(w + 76);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi9x0123, vk9x0123);
const float32x4_t vi10x0123 = vld1q_f32(i10); i10 += 4;
const float32x4_t vk10x0123 = vld1q_f32(w + 84);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi10x0123, vk10x0123);
const float32x4_t vi11x0123 = vld1q_f32(i11); i11 += 4;
const float32x4_t vk11x0123 = vld1q_f32(w + 92);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi11x0123, vk11x0123);
const float32x4_t vi12x0123 = vld1q_f32(i12); i12 += 4;
const float32x4_t vk12x0123 = vld1q_f32(w + 100);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi12x0123, vk12x0123);
const float32x4_t vi13x0123 = vld1q_f32(i13); i13 += 4;
const float32x4_t vk13x0123 = vld1q_f32(w + 108);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi13x0123, vk13x0123);
const float32x4_t vi14x0123 = vld1q_f32(i14); i14 += 4;
const float32x4_t vk14x0123 = vld1q_f32(w + 116);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi14x0123, vk14x0123);
const float32x4_t vi15x0123 = vld1q_f32(i15); i15 += 4;
const float32x4_t vk15x0123 = vld1q_f32(w + 124);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi15x0123, vk15x0123);
const float32x4_t vi16x0123 = vld1q_f32(i16); i16 += 4;
const float32x4_t vk16x0123 = vld1q_f32(w + 132);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi16x0123, vk16x0123);
const float32x4_t vi17x0123 = vld1q_f32(i17); i17 += 4;
const float32x4_t vk17x0123 = vld1q_f32(w + 140);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi17x0123, vk17x0123);
const float32x4_t vi18x0123 = vld1q_f32(i18); i18 += 4;
const float32x4_t vk18x0123 = vld1q_f32(w + 148);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi18x0123, vk18x0123);
const float32x4_t vi19x0123 = vld1q_f32(i19); i19 += 4;
const float32x4_t vk19x0123 = vld1q_f32(w + 156);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi19x0123, vk19x0123);
const float32x4_t vi20x0123 = vld1q_f32(i20); i20 += 4;
const float32x4_t vk20x0123 = vld1q_f32(w + 164);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi20x0123, vk20x0123);
const float32x4_t vi21x0123 = vld1q_f32(i21); i21 += 4;
const float32x4_t vk21x0123 = vld1q_f32(w + 172);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi21x0123, vk21x0123);
const float32x4_t vi22x0123 = vld1q_f32(i22); i22 += 4;
const float32x4_t vk22x0123 = vld1q_f32(w + 180);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi22x0123, vk22x0123);
const float32x4_t vi23x0123 = vld1q_f32(i23); i23 += 4;
const float32x4_t vk23x0123 = vld1q_f32(w + 188);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi23x0123, vk23x0123);
const float32x4_t vi24x0123 = vld1q_f32(i24); i24 += 4;
const float32x4_t vk24x0123 = vld1q_f32(w + 196);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi24x0123, vk24x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 8);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 16);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 24);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w + 32);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
const float32x4_t vk4x0123 = vld1q_f32(w + 40);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
const float32x4_t vk5x0123 = vld1q_f32(w + 48);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
const float32x4_t vk6x0123 = vld1q_f32(w + 56);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
const float32x4_t vk7x0123 = vld1q_f32(w + 64);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
const float32x4_t vk8x0123 = vld1q_f32(w + 72);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
const float32x4_t vi9x0123 = vld1q_f32(i9);
const float32x4_t vk9x0123 = vld1q_f32(w + 80);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi9x0123, vk9x0123);
const float32x4_t vi10x0123 = vld1q_f32(i10);
const float32x4_t vk10x0123 = vld1q_f32(w + 88);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi10x0123, vk10x0123);
const float32x4_t vi11x0123 = vld1q_f32(i11);
const float32x4_t vk11x0123 = vld1q_f32(w + 96);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi11x0123, vk11x0123);
const float32x4_t vi12x0123 = vld1q_f32(i12);
const float32x4_t vk12x0123 = vld1q_f32(w + 104);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi12x0123, vk12x0123);
const float32x4_t vi13x0123 = vld1q_f32(i13);
const float32x4_t vk13x0123 = vld1q_f32(w + 112);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi13x0123, vk13x0123);
const float32x4_t vi14x0123 = vld1q_f32(i14);
const float32x4_t vk14x0123 = vld1q_f32(w + 120);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi14x0123, vk14x0123);
const float32x4_t vi15x0123 = vld1q_f32(i15);
const float32x4_t vk15x0123 = vld1q_f32(w + 128);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi15x0123, vk15x0123);
const float32x4_t vi16x0123 = vld1q_f32(i16);
const float32x4_t vk16x0123 = vld1q_f32(w + 136);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi16x0123, vk16x0123);
const float32x4_t vi17x0123 = vld1q_f32(i17);
const float32x4_t vk17x0123 = vld1q_f32(w + 144);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi17x0123, vk17x0123);
const float32x4_t vi18x0123 = vld1q_f32(i18);
const float32x4_t vk18x0123 = vld1q_f32(w + 152);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi18x0123, vk18x0123);
const float32x4_t vi19x0123 = vld1q_f32(i19);
const float32x4_t vk19x0123 = vld1q_f32(w + 160);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi19x0123, vk19x0123);
const float32x4_t vi20x0123 = vld1q_f32(i20);
const float32x4_t vk20x0123 = vld1q_f32(w + 168);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi20x0123, vk20x0123);
const float32x4_t vi21x0123 = vld1q_f32(i21);
const float32x4_t vk21x0123 = vld1q_f32(w + 176);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi21x0123, vk21x0123);
const float32x4_t vi22x0123 = vld1q_f32(i22);
const float32x4_t vk22x0123 = vld1q_f32(w + 184);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi22x0123, vk22x0123);
const float32x4_t vi23x0123 = vld1q_f32(i23);
const float32x4_t vk23x0123 = vld1q_f32(w + 192);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi23x0123, vk23x0123);
const float32x4_t vi24x0123 = vld1q_f32(i24);
const float32x4_t vk24x0123 = vld1q_f32(w + 200);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi24x0123, vk24x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 24,205 | 40.377778 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p8c-minmax-neonfma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_25p8c__neonfma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi5x4567, vk5x4567);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi6x4567, vk6x4567);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi7x4567, vk7x4567);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vi8x4567 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk8x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi8x4567, vk8x4567);
const float32x4_t vi9x0123 = vld1q_f32(i9); i9 += 4;
const float32x4_t vi9x4567 = vld1q_f32(i9); i9 += 4;
const float32x4_t vk9x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk9x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi9x0123, vk9x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi9x4567, vk9x4567);
const float32x4_t vi10x0123 = vld1q_f32(i10); i10 += 4;
const float32x4_t vi10x4567 = vld1q_f32(i10); i10 += 4;
const float32x4_t vk10x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk10x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi10x0123, vk10x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi10x4567, vk10x4567);
const float32x4_t vi11x0123 = vld1q_f32(i11); i11 += 4;
const float32x4_t vi11x4567 = vld1q_f32(i11); i11 += 4;
const float32x4_t vk11x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk11x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi11x0123, vk11x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi11x4567, vk11x4567);
const float32x4_t vi12x0123 = vld1q_f32(i12); i12 += 4;
const float32x4_t vi12x4567 = vld1q_f32(i12); i12 += 4;
const float32x4_t vk12x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk12x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi12x0123, vk12x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi12x4567, vk12x4567);
const float32x4_t vi13x0123 = vld1q_f32(i13); i13 += 4;
const float32x4_t vi13x4567 = vld1q_f32(i13); i13 += 4;
const float32x4_t vk13x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk13x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi13x0123, vk13x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi13x4567, vk13x4567);
const float32x4_t vi14x0123 = vld1q_f32(i14); i14 += 4;
const float32x4_t vi14x4567 = vld1q_f32(i14); i14 += 4;
const float32x4_t vk14x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk14x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi14x0123, vk14x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi14x4567, vk14x4567);
const float32x4_t vi15x0123 = vld1q_f32(i15); i15 += 4;
const float32x4_t vi15x4567 = vld1q_f32(i15); i15 += 4;
const float32x4_t vk15x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk15x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi15x0123, vk15x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi15x4567, vk15x4567);
const float32x4_t vi16x0123 = vld1q_f32(i16); i16 += 4;
const float32x4_t vi16x4567 = vld1q_f32(i16); i16 += 4;
const float32x4_t vk16x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk16x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi16x0123, vk16x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi16x4567, vk16x4567);
const float32x4_t vi17x0123 = vld1q_f32(i17); i17 += 4;
const float32x4_t vi17x4567 = vld1q_f32(i17); i17 += 4;
const float32x4_t vk17x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk17x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi17x0123, vk17x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi17x4567, vk17x4567);
const float32x4_t vi18x0123 = vld1q_f32(i18); i18 += 4;
const float32x4_t vi18x4567 = vld1q_f32(i18); i18 += 4;
const float32x4_t vk18x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk18x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi18x0123, vk18x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi18x4567, vk18x4567);
const float32x4_t vi19x0123 = vld1q_f32(i19); i19 += 4;
const float32x4_t vi19x4567 = vld1q_f32(i19); i19 += 4;
const float32x4_t vk19x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk19x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi19x0123, vk19x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi19x4567, vk19x4567);
const float32x4_t vi20x0123 = vld1q_f32(i20); i20 += 4;
const float32x4_t vi20x4567 = vld1q_f32(i20); i20 += 4;
const float32x4_t vk20x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk20x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi20x0123, vk20x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi20x4567, vk20x4567);
const float32x4_t vi21x0123 = vld1q_f32(i21); i21 += 4;
const float32x4_t vi21x4567 = vld1q_f32(i21); i21 += 4;
const float32x4_t vk21x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk21x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi21x0123, vk21x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi21x4567, vk21x4567);
const float32x4_t vi22x0123 = vld1q_f32(i22); i22 += 4;
const float32x4_t vi22x4567 = vld1q_f32(i22); i22 += 4;
const float32x4_t vk22x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk22x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi22x0123, vk22x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi22x4567, vk22x4567);
const float32x4_t vi23x0123 = vld1q_f32(i23); i23 += 4;
const float32x4_t vi23x4567 = vld1q_f32(i23); i23 += 4;
const float32x4_t vk23x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk23x4567 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi23x0123, vk23x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi23x4567, vk23x4567);
const float32x4_t vi24x0123 = vld1q_f32(i24); i24 += 4;
const float32x4_t vi24x4567 = vld1q_f32(i24); i24 += 4;
const float32x4_t vk24x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk24x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi24x0123, vk24x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi24x4567, vk24x4567);
// Add up all accumulators to vacc01234567p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 4);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 12);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 20);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w + 28);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w + 36);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w + 44);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w + 52);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w + 60);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w + 68);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
const float32x4_t vi9x0123 = vld1q_f32(i9); i9 += 4;
const float32x4_t vk9x0123 = vld1q_f32(w + 76);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi9x0123, vk9x0123);
const float32x4_t vi10x0123 = vld1q_f32(i10); i10 += 4;
const float32x4_t vk10x0123 = vld1q_f32(w + 84);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi10x0123, vk10x0123);
const float32x4_t vi11x0123 = vld1q_f32(i11); i11 += 4;
const float32x4_t vk11x0123 = vld1q_f32(w + 92);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi11x0123, vk11x0123);
const float32x4_t vi12x0123 = vld1q_f32(i12); i12 += 4;
const float32x4_t vk12x0123 = vld1q_f32(w + 100);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi12x0123, vk12x0123);
const float32x4_t vi13x0123 = vld1q_f32(i13); i13 += 4;
const float32x4_t vk13x0123 = vld1q_f32(w + 108);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi13x0123, vk13x0123);
const float32x4_t vi14x0123 = vld1q_f32(i14); i14 += 4;
const float32x4_t vk14x0123 = vld1q_f32(w + 116);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi14x0123, vk14x0123);
const float32x4_t vi15x0123 = vld1q_f32(i15); i15 += 4;
const float32x4_t vk15x0123 = vld1q_f32(w + 124);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi15x0123, vk15x0123);
const float32x4_t vi16x0123 = vld1q_f32(i16); i16 += 4;
const float32x4_t vk16x0123 = vld1q_f32(w + 132);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi16x0123, vk16x0123);
const float32x4_t vi17x0123 = vld1q_f32(i17); i17 += 4;
const float32x4_t vk17x0123 = vld1q_f32(w + 140);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi17x0123, vk17x0123);
const float32x4_t vi18x0123 = vld1q_f32(i18); i18 += 4;
const float32x4_t vk18x0123 = vld1q_f32(w + 148);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi18x0123, vk18x0123);
const float32x4_t vi19x0123 = vld1q_f32(i19); i19 += 4;
const float32x4_t vk19x0123 = vld1q_f32(w + 156);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi19x0123, vk19x0123);
const float32x4_t vi20x0123 = vld1q_f32(i20); i20 += 4;
const float32x4_t vk20x0123 = vld1q_f32(w + 164);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi20x0123, vk20x0123);
const float32x4_t vi21x0123 = vld1q_f32(i21); i21 += 4;
const float32x4_t vk21x0123 = vld1q_f32(w + 172);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi21x0123, vk21x0123);
const float32x4_t vi22x0123 = vld1q_f32(i22); i22 += 4;
const float32x4_t vk22x0123 = vld1q_f32(w + 180);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi22x0123, vk22x0123);
const float32x4_t vi23x0123 = vld1q_f32(i23); i23 += 4;
const float32x4_t vk23x0123 = vld1q_f32(w + 188);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi23x0123, vk23x0123);
const float32x4_t vi24x0123 = vld1q_f32(i24); i24 += 4;
const float32x4_t vk24x0123 = vld1q_f32(w + 196);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi24x0123, vk24x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 8);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 16);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 24);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w + 32);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
const float32x4_t vk4x0123 = vld1q_f32(w + 40);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
const float32x4_t vk5x0123 = vld1q_f32(w + 48);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
const float32x4_t vk6x0123 = vld1q_f32(w + 56);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
const float32x4_t vk7x0123 = vld1q_f32(w + 64);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
const float32x4_t vk8x0123 = vld1q_f32(w + 72);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
const float32x4_t vi9x0123 = vld1q_f32(i9);
const float32x4_t vk9x0123 = vld1q_f32(w + 80);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi9x0123, vk9x0123);
const float32x4_t vi10x0123 = vld1q_f32(i10);
const float32x4_t vk10x0123 = vld1q_f32(w + 88);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi10x0123, vk10x0123);
const float32x4_t vi11x0123 = vld1q_f32(i11);
const float32x4_t vk11x0123 = vld1q_f32(w + 96);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi11x0123, vk11x0123);
const float32x4_t vi12x0123 = vld1q_f32(i12);
const float32x4_t vk12x0123 = vld1q_f32(w + 104);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi12x0123, vk12x0123);
const float32x4_t vi13x0123 = vld1q_f32(i13);
const float32x4_t vk13x0123 = vld1q_f32(w + 112);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi13x0123, vk13x0123);
const float32x4_t vi14x0123 = vld1q_f32(i14);
const float32x4_t vk14x0123 = vld1q_f32(w + 120);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi14x0123, vk14x0123);
const float32x4_t vi15x0123 = vld1q_f32(i15);
const float32x4_t vk15x0123 = vld1q_f32(w + 128);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi15x0123, vk15x0123);
const float32x4_t vi16x0123 = vld1q_f32(i16);
const float32x4_t vk16x0123 = vld1q_f32(w + 136);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi16x0123, vk16x0123);
const float32x4_t vi17x0123 = vld1q_f32(i17);
const float32x4_t vk17x0123 = vld1q_f32(w + 144);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi17x0123, vk17x0123);
const float32x4_t vi18x0123 = vld1q_f32(i18);
const float32x4_t vk18x0123 = vld1q_f32(w + 152);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi18x0123, vk18x0123);
const float32x4_t vi19x0123 = vld1q_f32(i19);
const float32x4_t vk19x0123 = vld1q_f32(w + 160);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi19x0123, vk19x0123);
const float32x4_t vi20x0123 = vld1q_f32(i20);
const float32x4_t vk20x0123 = vld1q_f32(w + 168);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi20x0123, vk20x0123);
const float32x4_t vi21x0123 = vld1q_f32(i21);
const float32x4_t vk21x0123 = vld1q_f32(w + 176);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi21x0123, vk21x0123);
const float32x4_t vi22x0123 = vld1q_f32(i22);
const float32x4_t vk22x0123 = vld1q_f32(w + 184);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi22x0123, vk22x0123);
const float32x4_t vi23x0123 = vld1q_f32(i23);
const float32x4_t vk23x0123 = vld1q_f32(w + 192);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi23x0123, vk23x0123);
const float32x4_t vi24x0123 = vld1q_f32(i24);
const float32x4_t vk24x0123 = vld1q_f32(w + 200);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi24x0123, vk24x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 24,574 | 40.511824 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-25p8c-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_25p8c__neonfma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
const float* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const float*) ((uintptr_t) i4 + input_offset);
}
const float* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const float*) ((uintptr_t) i5 + input_offset);
}
const float* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const float*) ((uintptr_t) i6 + input_offset);
}
const float* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const float*) ((uintptr_t) i7 + input_offset);
}
const float* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const float*) ((uintptr_t) i8 + input_offset);
}
const float* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const float*) ((uintptr_t) i9 + input_offset);
}
const float* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const float*) ((uintptr_t) i10 + input_offset);
}
const float* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const float*) ((uintptr_t) i11 + input_offset);
}
const float* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const float*) ((uintptr_t) i12 + input_offset);
}
const float* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const float*) ((uintptr_t) i13 + input_offset);
}
const float* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const float*) ((uintptr_t) i14 + input_offset);
}
const float* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const float*) ((uintptr_t) i15 + input_offset);
}
const float* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const float*) ((uintptr_t) i16 + input_offset);
}
const float* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const float*) ((uintptr_t) i17 + input_offset);
}
const float* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const float*) ((uintptr_t) i18 + input_offset);
}
const float* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const float*) ((uintptr_t) i19 + input_offset);
}
const float* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const float*) ((uintptr_t) i20 + input_offset);
}
const float* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const float*) ((uintptr_t) i21 + input_offset);
}
const float* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const float*) ((uintptr_t) i22 + input_offset);
}
const float* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const float*) ((uintptr_t) i23 + input_offset);
}
const float* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const float*) ((uintptr_t) i24 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi3x4567, vk3x4567);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi4x4567, vk4x4567);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi5x4567, vk5x4567);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi6x4567, vk6x4567);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi7x4567, vk7x4567);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vi8x4567 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk8x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi8x4567, vk8x4567);
const float32x4_t vi9x0123 = vld1q_f32(i9); i9 += 4;
const float32x4_t vi9x4567 = vld1q_f32(i9); i9 += 4;
const float32x4_t vk9x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk9x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi9x0123, vk9x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi9x4567, vk9x4567);
const float32x4_t vi10x0123 = vld1q_f32(i10); i10 += 4;
const float32x4_t vi10x4567 = vld1q_f32(i10); i10 += 4;
const float32x4_t vk10x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk10x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi10x0123, vk10x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi10x4567, vk10x4567);
const float32x4_t vi11x0123 = vld1q_f32(i11); i11 += 4;
const float32x4_t vi11x4567 = vld1q_f32(i11); i11 += 4;
const float32x4_t vk11x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk11x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi11x0123, vk11x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi11x4567, vk11x4567);
const float32x4_t vi12x0123 = vld1q_f32(i12); i12 += 4;
const float32x4_t vi12x4567 = vld1q_f32(i12); i12 += 4;
const float32x4_t vk12x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk12x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi12x0123, vk12x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi12x4567, vk12x4567);
const float32x4_t vi13x0123 = vld1q_f32(i13); i13 += 4;
const float32x4_t vi13x4567 = vld1q_f32(i13); i13 += 4;
const float32x4_t vk13x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk13x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi13x0123, vk13x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi13x4567, vk13x4567);
const float32x4_t vi14x0123 = vld1q_f32(i14); i14 += 4;
const float32x4_t vi14x4567 = vld1q_f32(i14); i14 += 4;
const float32x4_t vk14x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk14x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi14x0123, vk14x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi14x4567, vk14x4567);
const float32x4_t vi15x0123 = vld1q_f32(i15); i15 += 4;
const float32x4_t vi15x4567 = vld1q_f32(i15); i15 += 4;
const float32x4_t vk15x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk15x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi15x0123, vk15x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi15x4567, vk15x4567);
const float32x4_t vi16x0123 = vld1q_f32(i16); i16 += 4;
const float32x4_t vi16x4567 = vld1q_f32(i16); i16 += 4;
const float32x4_t vk16x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk16x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi16x0123, vk16x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi16x4567, vk16x4567);
const float32x4_t vi17x0123 = vld1q_f32(i17); i17 += 4;
const float32x4_t vi17x4567 = vld1q_f32(i17); i17 += 4;
const float32x4_t vk17x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk17x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi17x0123, vk17x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi17x4567, vk17x4567);
const float32x4_t vi18x0123 = vld1q_f32(i18); i18 += 4;
const float32x4_t vi18x4567 = vld1q_f32(i18); i18 += 4;
const float32x4_t vk18x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk18x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi18x0123, vk18x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi18x4567, vk18x4567);
const float32x4_t vi19x0123 = vld1q_f32(i19); i19 += 4;
const float32x4_t vi19x4567 = vld1q_f32(i19); i19 += 4;
const float32x4_t vk19x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk19x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi19x0123, vk19x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi19x4567, vk19x4567);
const float32x4_t vi20x0123 = vld1q_f32(i20); i20 += 4;
const float32x4_t vi20x4567 = vld1q_f32(i20); i20 += 4;
const float32x4_t vk20x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk20x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi20x0123, vk20x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi20x4567, vk20x4567);
const float32x4_t vi21x0123 = vld1q_f32(i21); i21 += 4;
const float32x4_t vi21x4567 = vld1q_f32(i21); i21 += 4;
const float32x4_t vk21x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk21x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi21x0123, vk21x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi21x4567, vk21x4567);
const float32x4_t vi22x0123 = vld1q_f32(i22); i22 += 4;
const float32x4_t vi22x4567 = vld1q_f32(i22); i22 += 4;
const float32x4_t vk22x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk22x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi22x0123, vk22x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi22x4567, vk22x4567);
const float32x4_t vi23x0123 = vld1q_f32(i23); i23 += 4;
const float32x4_t vi23x4567 = vld1q_f32(i23); i23 += 4;
const float32x4_t vk23x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk23x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi23x0123, vk23x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi23x4567, vk23x4567);
const float32x4_t vi24x0123 = vld1q_f32(i24); i24 += 4;
const float32x4_t vi24x4567 = vld1q_f32(i24); i24 += 4;
const float32x4_t vk24x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk24x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi24x0123, vk24x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi24x4567, vk24x4567);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 4);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 12);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 20);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w + 28);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
const float32x4_t vk4x0123 = vld1q_f32(w + 36);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
const float32x4_t vk5x0123 = vld1q_f32(w + 44);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
const float32x4_t vk6x0123 = vld1q_f32(w + 52);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
const float32x4_t vk7x0123 = vld1q_f32(w + 60);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
const float32x4_t vk8x0123 = vld1q_f32(w + 68);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
const float32x4_t vi9x0123 = vld1q_f32(i9); i9 += 4;
const float32x4_t vk9x0123 = vld1q_f32(w + 76);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi9x0123, vk9x0123);
const float32x4_t vi10x0123 = vld1q_f32(i10); i10 += 4;
const float32x4_t vk10x0123 = vld1q_f32(w + 84);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi10x0123, vk10x0123);
const float32x4_t vi11x0123 = vld1q_f32(i11); i11 += 4;
const float32x4_t vk11x0123 = vld1q_f32(w + 92);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi11x0123, vk11x0123);
const float32x4_t vi12x0123 = vld1q_f32(i12); i12 += 4;
const float32x4_t vk12x0123 = vld1q_f32(w + 100);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi12x0123, vk12x0123);
const float32x4_t vi13x0123 = vld1q_f32(i13); i13 += 4;
const float32x4_t vk13x0123 = vld1q_f32(w + 108);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi13x0123, vk13x0123);
const float32x4_t vi14x0123 = vld1q_f32(i14); i14 += 4;
const float32x4_t vk14x0123 = vld1q_f32(w + 116);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi14x0123, vk14x0123);
const float32x4_t vi15x0123 = vld1q_f32(i15); i15 += 4;
const float32x4_t vk15x0123 = vld1q_f32(w + 124);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi15x0123, vk15x0123);
const float32x4_t vi16x0123 = vld1q_f32(i16); i16 += 4;
const float32x4_t vk16x0123 = vld1q_f32(w + 132);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi16x0123, vk16x0123);
const float32x4_t vi17x0123 = vld1q_f32(i17); i17 += 4;
const float32x4_t vk17x0123 = vld1q_f32(w + 140);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi17x0123, vk17x0123);
const float32x4_t vi18x0123 = vld1q_f32(i18); i18 += 4;
const float32x4_t vk18x0123 = vld1q_f32(w + 148);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi18x0123, vk18x0123);
const float32x4_t vi19x0123 = vld1q_f32(i19); i19 += 4;
const float32x4_t vk19x0123 = vld1q_f32(w + 156);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi19x0123, vk19x0123);
const float32x4_t vi20x0123 = vld1q_f32(i20); i20 += 4;
const float32x4_t vk20x0123 = vld1q_f32(w + 164);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi20x0123, vk20x0123);
const float32x4_t vi21x0123 = vld1q_f32(i21); i21 += 4;
const float32x4_t vk21x0123 = vld1q_f32(w + 172);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi21x0123, vk21x0123);
const float32x4_t vi22x0123 = vld1q_f32(i22); i22 += 4;
const float32x4_t vk22x0123 = vld1q_f32(w + 180);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi22x0123, vk22x0123);
const float32x4_t vi23x0123 = vld1q_f32(i23); i23 += 4;
const float32x4_t vk23x0123 = vld1q_f32(w + 188);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi23x0123, vk23x0123);
const float32x4_t vi24x0123 = vld1q_f32(i24); i24 += 4;
const float32x4_t vk24x0123 = vld1q_f32(w + 196);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi24x0123, vk24x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 8);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 16);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 24);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w + 32);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
const float32x4_t vi4x0123 = vld1q_f32(i4);
const float32x4_t vk4x0123 = vld1q_f32(w + 40);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
const float32x4_t vi5x0123 = vld1q_f32(i5);
const float32x4_t vk5x0123 = vld1q_f32(w + 48);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123);
const float32x4_t vi6x0123 = vld1q_f32(i6);
const float32x4_t vk6x0123 = vld1q_f32(w + 56);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
const float32x4_t vi7x0123 = vld1q_f32(i7);
const float32x4_t vk7x0123 = vld1q_f32(w + 64);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123);
const float32x4_t vi8x0123 = vld1q_f32(i8);
const float32x4_t vk8x0123 = vld1q_f32(w + 72);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
const float32x4_t vi9x0123 = vld1q_f32(i9);
const float32x4_t vk9x0123 = vld1q_f32(w + 80);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi9x0123, vk9x0123);
const float32x4_t vi10x0123 = vld1q_f32(i10);
const float32x4_t vk10x0123 = vld1q_f32(w + 88);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi10x0123, vk10x0123);
const float32x4_t vi11x0123 = vld1q_f32(i11);
const float32x4_t vk11x0123 = vld1q_f32(w + 96);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi11x0123, vk11x0123);
const float32x4_t vi12x0123 = vld1q_f32(i12);
const float32x4_t vk12x0123 = vld1q_f32(w + 104);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi12x0123, vk12x0123);
const float32x4_t vi13x0123 = vld1q_f32(i13);
const float32x4_t vk13x0123 = vld1q_f32(w + 112);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi13x0123, vk13x0123);
const float32x4_t vi14x0123 = vld1q_f32(i14);
const float32x4_t vk14x0123 = vld1q_f32(w + 120);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi14x0123, vk14x0123);
const float32x4_t vi15x0123 = vld1q_f32(i15);
const float32x4_t vk15x0123 = vld1q_f32(w + 128);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi15x0123, vk15x0123);
const float32x4_t vi16x0123 = vld1q_f32(i16);
const float32x4_t vk16x0123 = vld1q_f32(w + 136);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi16x0123, vk16x0123);
const float32x4_t vi17x0123 = vld1q_f32(i17);
const float32x4_t vk17x0123 = vld1q_f32(w + 144);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi17x0123, vk17x0123);
const float32x4_t vi18x0123 = vld1q_f32(i18);
const float32x4_t vk18x0123 = vld1q_f32(w + 152);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi18x0123, vk18x0123);
const float32x4_t vi19x0123 = vld1q_f32(i19);
const float32x4_t vk19x0123 = vld1q_f32(w + 160);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi19x0123, vk19x0123);
const float32x4_t vi20x0123 = vld1q_f32(i20);
const float32x4_t vk20x0123 = vld1q_f32(w + 168);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi20x0123, vk20x0123);
const float32x4_t vi21x0123 = vld1q_f32(i21);
const float32x4_t vk21x0123 = vld1q_f32(w + 176);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi21x0123, vk21x0123);
const float32x4_t vi22x0123 = vld1q_f32(i22);
const float32x4_t vk22x0123 = vld1q_f32(w + 184);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi22x0123, vk22x0123);
const float32x4_t vi23x0123 = vld1q_f32(i23);
const float32x4_t vk23x0123 = vld1q_f32(w + 192);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi23x0123, vk23x0123);
const float32x4_t vi24x0123 = vld1q_f32(i24);
const float32x4_t vk24x0123 = vld1q_f32(w + 200);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi24x0123, vk24x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 24,208 | 40.382906 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-2f2m2l16c4s4r-minmax-neon-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_2f2m2l16c4s4r__neon_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 2);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 16; c -= 16) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
float32x4_t vacc89ABp0 = vld1q_f32(w); w += 4;
float32x4_t vaccCDEFp0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
float32x4_t vacc89ABp1 = vmulq_f32(vi1x89AB, vk1x89AB);
float32x4_t vaccCDEFp1 = vmulq_f32(vi1xCDEF, vk1xCDEF);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vacc89ABp0 = vaddq_f32(vacc89ABp0, vacc89ABp1);
vaccCDEFp0 = vaddq_f32(vaccCDEFp0, vaccCDEFp1);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
vst1q_f32(b, vacc89ABp0); b += 4;
vst1q_f32(b, vaccCDEFp0); b += 4;
}
for (; c != 0; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 2 inputs in each iteration.
for (size_t ks = kernel_size - 2; ks > 2; ks -= 2) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
size_t c = round_up_po2(channels, 4);
for (; c >= 16; c -= 16) {
float32x4_t vacc0123p0 = vld1q_f32(b);
float32x4_t vacc4567p0 = vld1q_f32(b + 4);
float32x4_t vacc89ABp0 = vld1q_f32(b + 8);
float32x4_t vaccCDEFp0 = vld1q_f32(b + 12);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
float32x4_t vacc89ABp1 = vmulq_f32(vi1x89AB, vk1x89AB);
float32x4_t vaccCDEFp1 = vmulq_f32(vi1xCDEF, vk1xCDEF);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vacc89ABp0 = vaddq_f32(vacc89ABp0, vacc89ABp1);
vaccCDEFp0 = vaddq_f32(vaccCDEFp0, vaccCDEFp1);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
vst1q_f32(b, vacc89ABp0); b += 4;
vst1q_f32(b, vaccCDEFp0); b += 4;
}
for (; c != 0; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0);
b += 4;
}
}
// Last pass to process up to 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
size_t c = channels;
for (; c >= 16; c -= 16) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
float32x4_t vacc4567p0 = vld1q_f32(b); b += 4;
float32x4_t vacc89ABp0 = vld1q_f32(b); b += 4;
float32x4_t vaccCDEFp0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
float32x4_t vacc89ABp1 = vmulq_f32(vi1x89AB, vk1x89AB);
float32x4_t vaccCDEFp1 = vmulq_f32(vi1xCDEF, vk1xCDEF);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vacc89ABp0 = vaddq_f32(vacc89ABp0, vacc89ABp1);
vaccCDEFp0 = vaddq_f32(vaccCDEFp0, vaccCDEFp1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
float32x4_t vacc89AB = vmaxq_f32(vacc89ABp0, vmin);
float32x4_t vaccCDEF = vmaxq_f32(vaccCDEFp0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vacc89AB = vminq_f32(vacc89AB, vmax);
vaccCDEF = vminq_f32(vaccCDEF, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
vst1q_f32(output, vacc89AB); output += 4;
vst1q_f32(output, vaccCDEF); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 12,578 | 35.25072 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-2f2m2l16c4s4r-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_2f2m2l16c4s4r__neon(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 2);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 16; c -= 16) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
float32x4_t vacc89ABp0 = vld1q_f32(w); w += 4;
float32x4_t vaccCDEFp0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi1x4567, vk1x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi1x89AB, vk1x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi1xCDEF, vk1xCDEF);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
vst1q_f32(b, vacc89ABp0); b += 4;
vst1q_f32(b, vaccCDEFp0); b += 4;
}
for (; c != 0; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 2 inputs in each iteration.
for (size_t ks = kernel_size - 2; ks > 2; ks -= 2) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
size_t c = round_up_po2(channels, 4);
for (; c >= 16; c -= 16) {
float32x4_t vacc0123p0 = vld1q_f32(b);
float32x4_t vacc4567p0 = vld1q_f32(b + 4);
float32x4_t vacc89ABp0 = vld1q_f32(b + 8);
float32x4_t vaccCDEFp0 = vld1q_f32(b + 12);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi1x4567, vk1x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi1x89AB, vk1x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi1xCDEF, vk1xCDEF);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
vst1q_f32(b, vacc89ABp0); b += 4;
vst1q_f32(b, vaccCDEFp0); b += 4;
}
for (; c != 0; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vst1q_f32(b, vacc0123p0);
b += 4;
}
}
// Last pass to process up to 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
size_t c = channels;
for (; c >= 16; c -= 16) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
float32x4_t vacc4567p0 = vld1q_f32(b); b += 4;
float32x4_t vacc89ABp0 = vld1q_f32(b); b += 4;
float32x4_t vaccCDEFp0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi1x4567, vk1x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi1x89AB, vk1x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi1xCDEF, vk1xCDEF);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
float32x4_t vacc89AB = vmaxq_f32(vacc89ABp0, vmin);
float32x4_t vaccCDEF = vmaxq_f32(vaccCDEFp0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vacc89AB = vminq_f32(vacc89AB, vmax);
vaccCDEF = vminq_f32(vaccCDEF, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
vst1q_f32(output, vacc89AB); output += 4;
vst1q_f32(output, vaccCDEF); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,334 | 33.984568 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-2f2m2l16c4s4r-minmax-neonfma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_2f2m2l16c4s4r__neonfma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 2);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 16; c -= 16) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
float32x4_t vacc89ABp0 = vld1q_f32(w); w += 4;
float32x4_t vaccCDEFp0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
float32x4_t vacc89ABp1 = vmulq_f32(vi1x89AB, vk1x89AB);
float32x4_t vaccCDEFp1 = vmulq_f32(vi1xCDEF, vk1xCDEF);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vacc89ABp0 = vaddq_f32(vacc89ABp0, vacc89ABp1);
vaccCDEFp0 = vaddq_f32(vaccCDEFp0, vaccCDEFp1);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
vst1q_f32(b, vacc89ABp0); b += 4;
vst1q_f32(b, vaccCDEFp0); b += 4;
}
for (; c != 0; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 2 inputs in each iteration.
for (size_t ks = kernel_size - 2; ks > 2; ks -= 2) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
size_t c = round_up_po2(channels, 4);
for (; c >= 16; c -= 16) {
float32x4_t vacc0123p0 = vld1q_f32(b);
float32x4_t vacc4567p0 = vld1q_f32(b + 4);
float32x4_t vacc89ABp0 = vld1q_f32(b + 8);
float32x4_t vaccCDEFp0 = vld1q_f32(b + 12);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
float32x4_t vacc89ABp1 = vmulq_f32(vi1x89AB, vk1x89AB);
float32x4_t vaccCDEFp1 = vmulq_f32(vi1xCDEF, vk1xCDEF);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vacc89ABp0 = vaddq_f32(vacc89ABp0, vacc89ABp1);
vaccCDEFp0 = vaddq_f32(vaccCDEFp0, vaccCDEFp1);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
vst1q_f32(b, vacc89ABp0); b += 4;
vst1q_f32(b, vaccCDEFp0); b += 4;
}
for (; c != 0; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0);
b += 4;
}
}
// Last pass to process up to 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
size_t c = channels;
for (; c >= 16; c -= 16) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
float32x4_t vacc4567p0 = vld1q_f32(b); b += 4;
float32x4_t vacc89ABp0 = vld1q_f32(b); b += 4;
float32x4_t vaccCDEFp0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
float32x4_t vacc89ABp1 = vmulq_f32(vi1x89AB, vk1x89AB);
float32x4_t vaccCDEFp1 = vmulq_f32(vi1xCDEF, vk1xCDEF);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vacc89ABp0 = vaddq_f32(vacc89ABp0, vacc89ABp1);
vaccCDEFp0 = vaddq_f32(vaccCDEFp0, vaccCDEFp1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
float32x4_t vacc89AB = vmaxq_f32(vacc89ABp0, vmin);
float32x4_t vaccCDEF = vmaxq_f32(vaccCDEFp0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vacc89AB = vminq_f32(vacc89AB, vmax);
vaccCDEF = vminq_f32(vaccCDEF, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
vst1q_f32(output, vacc89AB); output += 4;
vst1q_f32(output, vaccCDEF); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 12,581 | 35.259366 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-2f2m2l16c4s4r-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_2f2m2l16c4s4r__neonfma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 2);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 16; c -= 16) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
float32x4_t vacc89ABp0 = vld1q_f32(w); w += 4;
float32x4_t vaccCDEFp0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi1x4567, vk1x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi1x89AB, vk1x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi1xCDEF, vk1xCDEF);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
vst1q_f32(b, vacc89ABp0); b += 4;
vst1q_f32(b, vaccCDEFp0); b += 4;
}
for (; c != 0; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 2 inputs in each iteration.
for (size_t ks = kernel_size - 2; ks > 2; ks -= 2) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
size_t c = round_up_po2(channels, 4);
for (; c >= 16; c -= 16) {
float32x4_t vacc0123p0 = vld1q_f32(b);
float32x4_t vacc4567p0 = vld1q_f32(b + 4);
float32x4_t vacc89ABp0 = vld1q_f32(b + 8);
float32x4_t vaccCDEFp0 = vld1q_f32(b + 12);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi1x4567, vk1x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi1x89AB, vk1x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi1xCDEF, vk1xCDEF);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
vst1q_f32(b, vacc89ABp0); b += 4;
vst1q_f32(b, vaccCDEFp0); b += 4;
}
for (; c != 0; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vst1q_f32(b, vacc0123p0);
b += 4;
}
}
// Last pass to process up to 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
size_t c = channels;
for (; c >= 16; c -= 16) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
float32x4_t vacc4567p0 = vld1q_f32(b); b += 4;
float32x4_t vacc89ABp0 = vld1q_f32(b); b += 4;
float32x4_t vaccCDEFp0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi1x4567, vk1x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi1x89AB, vk1x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi1xCDEF, vk1xCDEF);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
float32x4_t vacc89AB = vmaxq_f32(vacc89ABp0, vmin);
float32x4_t vaccCDEF = vmaxq_f32(vaccCDEFp0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vacc89AB = vminq_f32(vacc89AB, vmax);
vaccCDEF = vminq_f32(vaccCDEF, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
vst1q_f32(output, vacc89AB); output += 4;
vst1q_f32(output, vaccCDEF); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,337 | 33.993827 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-2f2m2l1c1s1r-minmax-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_2f2m2l1c1s1r__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 2);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* w = weights;
// First pass to process 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
w += 3;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
*b++ = vacc0p0;
}
}
// Middle pass to process 2 inputs in each iteration.
for (size_t ks = kernel_size - 2; ks > 2; ks -= 2) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
w += 2;
*b++ = vacc0p0;
}
}
// Last pass to process up to 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
w += 2;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,979 | 25.891892 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-2f2m2l1c1s1r-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_2f2m2l1c1s1r__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 2);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* w = weights;
// First pass to process 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
w += 3;
*b++ = vacc0p0;
}
}
// Middle pass to process 2 inputs in each iteration.
for (size_t ks = kernel_size - 2; ks > 2; ks -= 2) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
w += 2;
*b++ = vacc0p0;
}
}
// Last pass to process up to 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
w += 2;
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,782 | 25.640845 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-2f2m2l1c1s1r-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_2f2m2l1c1s1r__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 2);
do {
const float* w = weights;
// First pass to process 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
w += 3;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
*b++ = vacc0p0;
}
}
// Middle pass to process 2 inputs in each iteration.
for (size_t ks = kernel_size - 2; ks > 2; ks -= 2) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
w += 2;
*b++ = vacc0p0;
}
}
// Last pass to process up to 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
w += 2;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
*output++ = vacc0p0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,799 | 25.388889 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-2f2m2l1c1s1r-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_2f2m2l1c1s1r__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 2);
do {
const float* w = weights;
// First pass to process 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
w += 3;
*b++ = vacc0p0;
}
}
// Middle pass to process 2 inputs in each iteration.
for (size_t ks = kernel_size - 2; ks > 2; ks -= 2) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
w += 2;
*b++ = vacc0p0;
}
}
// Last pass to process up to 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
w += 2;
*output++ = vacc0p0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,602 | 25.108696 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-2f2m2l4c1s1r-minmax-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_2f2m2l4c1s1r__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 2);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* w = weights;
// First pass to process 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 1);
for (; c >= 4; c -= 4) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
float vacc2p0 = w[2];
float vacc3p0 = w[3];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
const float vi0x2 = i0[2];
const float vi0x3 = i0[3];
i0 += 4;
const float vk0x0 = w[4];
const float vk0x1 = w[5];
const float vk0x2 = w[6];
const float vk0x3 = w[7];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
vacc2p0 = math_muladd_f32(vi0x2, vk0x2, vacc2p0);
vacc3p0 = math_muladd_f32(vi0x3, vk0x3, vacc3p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
const float vi1x2 = i1[2];
const float vi1x3 = i1[3];
i1 += 4;
const float vk1x0 = w[8];
const float vk1x1 = w[9];
const float vk1x2 = w[10];
const float vk1x3 = w[11];
float vacc0p1 = vi1x0 * vk1x0;
float vacc1p1 = vi1x1 * vk1x1;
float vacc2p1 = vi1x2 * vk1x2;
float vacc3p1 = vi1x3 * vk1x3;
w += 12;
// Add up all accumulators to vacc0123p0
vacc0p0 = vacc0p0 + vacc0p1;
vacc1p0 = vacc1p0 + vacc1p1;
vacc2p0 = vacc2p0 + vacc2p1;
vacc3p0 = vacc3p0 + vacc3p1;
b[0] = vacc0p0;
b[1] = vacc1p0;
b[2] = vacc2p0;
b[3] = vacc3p0;
b += 4;
}
for (; c != 0; c --) {
float vacc0p0 = w[0];
const float vi0x0 = i0[0];
i0 += 1;
const float vk0x0 = w[1];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vi1x0 = i1[0];
i1 += 1;
const float vk1x0 = w[2];
float vacc0p1 = vi1x0 * vk1x0;
w += 3;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
b[0] = vacc0p0;
b += 1;
}
}
// Middle pass to process 2 inputs in each iteration.
for (size_t ks = kernel_size - 2; ks > 2; ks -= 2) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
size_t c = round_up_po2(channels, 1);
for (; c >= 4; c -= 4) {
float vacc0p0 = b[0];
float vacc1p0 = b[1];
float vacc2p0 = b[2];
float vacc3p0 = b[3];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
const float vi0x2 = i0[2];
const float vi0x3 = i0[3];
i0 += 4;
const float vk0x0 = w[0];
const float vk0x1 = w[1];
const float vk0x2 = w[2];
const float vk0x3 = w[3];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
vacc2p0 = math_muladd_f32(vi0x2, vk0x2, vacc2p0);
vacc3p0 = math_muladd_f32(vi0x3, vk0x3, vacc3p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
const float vi1x2 = i1[2];
const float vi1x3 = i1[3];
i1 += 4;
const float vk1x0 = w[4];
const float vk1x1 = w[5];
const float vk1x2 = w[6];
const float vk1x3 = w[7];
float vacc0p1 = vi1x0 * vk1x0;
float vacc1p1 = vi1x1 * vk1x1;
float vacc2p1 = vi1x2 * vk1x2;
float vacc3p1 = vi1x3 * vk1x3;
w += 8;
// Add up all accumulators to vacc0123p0
vacc0p0 = vacc0p0 + vacc0p1;
vacc1p0 = vacc1p0 + vacc1p1;
vacc2p0 = vacc2p0 + vacc2p1;
vacc3p0 = vacc3p0 + vacc3p1;
b[0] = vacc0p0;
b[1] = vacc1p0;
b[2] = vacc2p0;
b[3] = vacc3p0;
b += 4;
}
for (; c != 0; c --) {
float vacc0p0 = b[0];
const float vi0x0 = i0[0];
i0 += 1;
const float vk0x0 = w[0];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vi1x0 = i1[0];
i1 += 1;
const float vk1x0 = w[1];
float vacc0p1 = vi1x0 * vk1x0;
w += 2;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
b[0] = vacc0p0;
b += 1;
}
}
// Last pass to process up to 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
float vacc0p0 = b[0];
float vacc1p0 = b[1];
float vacc2p0 = b[2];
float vacc3p0 = b[3];
b += 4;
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
const float vi0x2 = i0[2];
const float vi0x3 = i0[3];
i0 += 4;
const float vk0x0 = w[0];
const float vk0x1 = w[1];
const float vk0x2 = w[2];
const float vk0x3 = w[3];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
vacc2p0 = math_muladd_f32(vi0x2, vk0x2, vacc2p0);
vacc3p0 = math_muladd_f32(vi0x3, vk0x3, vacc3p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
const float vi1x2 = i1[2];
const float vi1x3 = i1[3];
i1 += 4;
const float vk1x0 = w[4];
const float vk1x1 = w[5];
const float vk1x2 = w[6];
const float vk1x3 = w[7];
float vacc0p1 = vi1x0 * vk1x0;
float vacc1p1 = vi1x1 * vk1x1;
float vacc2p1 = vi1x2 * vk1x2;
float vacc3p1 = vi1x3 * vk1x3;
w += 8;
// Add up all accumulators to vacc0123p0
vacc0p0 = vacc0p0 + vacc0p1;
vacc1p0 = vacc1p0 + vacc1p1;
vacc2p0 = vacc2p0 + vacc2p1;
vacc3p0 = vacc3p0 + vacc3p1;
float vacc0 = math_max_f32(vacc0p0, vmin);
float vacc1 = math_max_f32(vacc1p0, vmin);
float vacc2 = math_max_f32(vacc2p0, vmin);
float vacc3 = math_max_f32(vacc3p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
vacc1 = math_min_f32(vacc1, vmax);
vacc2 = math_min_f32(vacc2, vmax);
vacc3 = math_min_f32(vacc3, vmax);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
for (; c != 0; c --) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
w += 2;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,987 | 26.154079 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-2f2m2l4c1s1r-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_2f2m2l4c1s1r__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 2);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* w = weights;
// First pass to process 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 1);
for (; c >= 4; c -= 4) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
float vacc2p0 = w[2];
float vacc3p0 = w[3];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
const float vi0x2 = i0[2];
const float vi0x3 = i0[3];
i0 += 4;
const float vk0x0 = w[4];
const float vk0x1 = w[5];
const float vk0x2 = w[6];
const float vk0x3 = w[7];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
vacc2p0 = math_muladd_f32(vi0x2, vk0x2, vacc2p0);
vacc3p0 = math_muladd_f32(vi0x3, vk0x3, vacc3p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
const float vi1x2 = i1[2];
const float vi1x3 = i1[3];
i1 += 4;
const float vk1x0 = w[8];
const float vk1x1 = w[9];
const float vk1x2 = w[10];
const float vk1x3 = w[11];
vacc0p0 = math_muladd_f32(vi1x0, vk1x0, vacc0p0);
vacc1p0 = math_muladd_f32(vi1x1, vk1x1, vacc1p0);
vacc2p0 = math_muladd_f32(vi1x2, vk1x2, vacc2p0);
vacc3p0 = math_muladd_f32(vi1x3, vk1x3, vacc3p0);
w += 12;
b[0] = vacc0p0;
b[1] = vacc1p0;
b[2] = vacc2p0;
b[3] = vacc3p0;
b += 4;
}
for (; c != 0; c --) {
float vacc0p0 = w[0];
const float vi0x0 = i0[0];
i0 += 1;
const float vk0x0 = w[1];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vi1x0 = i1[0];
i1 += 1;
const float vk1x0 = w[2];
vacc0p0 = math_muladd_f32(vi1x0, vk1x0, vacc0p0);
w += 3;
b[0] = vacc0p0;
b += 1;
}
}
// Middle pass to process 2 inputs in each iteration.
for (size_t ks = kernel_size - 2; ks > 2; ks -= 2) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
size_t c = round_up_po2(channels, 1);
for (; c >= 4; c -= 4) {
float vacc0p0 = b[0];
float vacc1p0 = b[1];
float vacc2p0 = b[2];
float vacc3p0 = b[3];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
const float vi0x2 = i0[2];
const float vi0x3 = i0[3];
i0 += 4;
const float vk0x0 = w[0];
const float vk0x1 = w[1];
const float vk0x2 = w[2];
const float vk0x3 = w[3];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
vacc2p0 = math_muladd_f32(vi0x2, vk0x2, vacc2p0);
vacc3p0 = math_muladd_f32(vi0x3, vk0x3, vacc3p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
const float vi1x2 = i1[2];
const float vi1x3 = i1[3];
i1 += 4;
const float vk1x0 = w[4];
const float vk1x1 = w[5];
const float vk1x2 = w[6];
const float vk1x3 = w[7];
vacc0p0 = math_muladd_f32(vi1x0, vk1x0, vacc0p0);
vacc1p0 = math_muladd_f32(vi1x1, vk1x1, vacc1p0);
vacc2p0 = math_muladd_f32(vi1x2, vk1x2, vacc2p0);
vacc3p0 = math_muladd_f32(vi1x3, vk1x3, vacc3p0);
w += 8;
b[0] = vacc0p0;
b[1] = vacc1p0;
b[2] = vacc2p0;
b[3] = vacc3p0;
b += 4;
}
for (; c != 0; c --) {
float vacc0p0 = b[0];
const float vi0x0 = i0[0];
i0 += 1;
const float vk0x0 = w[0];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vi1x0 = i1[0];
i1 += 1;
const float vk1x0 = w[1];
vacc0p0 = math_muladd_f32(vi1x0, vk1x0, vacc0p0);
w += 2;
b[0] = vacc0p0;
b += 1;
}
}
// Last pass to process up to 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
float vacc0p0 = b[0];
float vacc1p0 = b[1];
float vacc2p0 = b[2];
float vacc3p0 = b[3];
b += 4;
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
const float vi0x2 = i0[2];
const float vi0x3 = i0[3];
i0 += 4;
const float vk0x0 = w[0];
const float vk0x1 = w[1];
const float vk0x2 = w[2];
const float vk0x3 = w[3];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
vacc2p0 = math_muladd_f32(vi0x2, vk0x2, vacc2p0);
vacc3p0 = math_muladd_f32(vi0x3, vk0x3, vacc3p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
const float vi1x2 = i1[2];
const float vi1x3 = i1[3];
i1 += 4;
const float vk1x0 = w[4];
const float vk1x1 = w[5];
const float vk1x2 = w[6];
const float vk1x3 = w[7];
vacc0p0 = math_muladd_f32(vi1x0, vk1x0, vacc0p0);
vacc1p0 = math_muladd_f32(vi1x1, vk1x1, vacc1p0);
vacc2p0 = math_muladd_f32(vi1x2, vk1x2, vacc2p0);
vacc3p0 = math_muladd_f32(vi1x3, vk1x3, vacc3p0);
w += 8;
float vacc0 = math_max_f32(vacc0p0, vmin);
float vacc1 = math_max_f32(vacc1p0, vmin);
float vacc2 = math_max_f32(vacc2p0, vmin);
float vacc3 = math_max_f32(vacc3p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
vacc1 = math_min_f32(vacc1, vmax);
vacc2 = math_min_f32(vacc2, vmax);
vacc3 = math_min_f32(vacc3, vmax);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
for (; c != 0; c --) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
w += 2;
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,427 | 26.187097 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-2f2m2l4c1s1r-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_2f2m2l4c1s1r__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 2);
do {
const float* w = weights;
// First pass to process 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 1);
for (; c >= 4; c -= 4) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
float vacc2p0 = w[2];
float vacc3p0 = w[3];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
const float vi0x2 = i0[2];
const float vi0x3 = i0[3];
i0 += 4;
const float vk0x0 = w[4];
const float vk0x1 = w[5];
const float vk0x2 = w[6];
const float vk0x3 = w[7];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
vacc2p0 = math_muladd_f32(vi0x2, vk0x2, vacc2p0);
vacc3p0 = math_muladd_f32(vi0x3, vk0x3, vacc3p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
const float vi1x2 = i1[2];
const float vi1x3 = i1[3];
i1 += 4;
const float vk1x0 = w[8];
const float vk1x1 = w[9];
const float vk1x2 = w[10];
const float vk1x3 = w[11];
float vacc0p1 = vi1x0 * vk1x0;
float vacc1p1 = vi1x1 * vk1x1;
float vacc2p1 = vi1x2 * vk1x2;
float vacc3p1 = vi1x3 * vk1x3;
w += 12;
// Add up all accumulators to vacc0123p0
vacc0p0 = vacc0p0 + vacc0p1;
vacc1p0 = vacc1p0 + vacc1p1;
vacc2p0 = vacc2p0 + vacc2p1;
vacc3p0 = vacc3p0 + vacc3p1;
b[0] = vacc0p0;
b[1] = vacc1p0;
b[2] = vacc2p0;
b[3] = vacc3p0;
b += 4;
}
for (; c != 0; c --) {
float vacc0p0 = w[0];
const float vi0x0 = i0[0];
i0 += 1;
const float vk0x0 = w[1];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vi1x0 = i1[0];
i1 += 1;
const float vk1x0 = w[2];
float vacc0p1 = vi1x0 * vk1x0;
w += 3;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
b[0] = vacc0p0;
b += 1;
}
}
// Middle pass to process 2 inputs in each iteration.
for (size_t ks = kernel_size - 2; ks > 2; ks -= 2) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
size_t c = round_up_po2(channels, 1);
for (; c >= 4; c -= 4) {
float vacc0p0 = b[0];
float vacc1p0 = b[1];
float vacc2p0 = b[2];
float vacc3p0 = b[3];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
const float vi0x2 = i0[2];
const float vi0x3 = i0[3];
i0 += 4;
const float vk0x0 = w[0];
const float vk0x1 = w[1];
const float vk0x2 = w[2];
const float vk0x3 = w[3];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
vacc2p0 = math_muladd_f32(vi0x2, vk0x2, vacc2p0);
vacc3p0 = math_muladd_f32(vi0x3, vk0x3, vacc3p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
const float vi1x2 = i1[2];
const float vi1x3 = i1[3];
i1 += 4;
const float vk1x0 = w[4];
const float vk1x1 = w[5];
const float vk1x2 = w[6];
const float vk1x3 = w[7];
float vacc0p1 = vi1x0 * vk1x0;
float vacc1p1 = vi1x1 * vk1x1;
float vacc2p1 = vi1x2 * vk1x2;
float vacc3p1 = vi1x3 * vk1x3;
w += 8;
// Add up all accumulators to vacc0123p0
vacc0p0 = vacc0p0 + vacc0p1;
vacc1p0 = vacc1p0 + vacc1p1;
vacc2p0 = vacc2p0 + vacc2p1;
vacc3p0 = vacc3p0 + vacc3p1;
b[0] = vacc0p0;
b[1] = vacc1p0;
b[2] = vacc2p0;
b[3] = vacc3p0;
b += 4;
}
for (; c != 0; c --) {
float vacc0p0 = b[0];
const float vi0x0 = i0[0];
i0 += 1;
const float vk0x0 = w[0];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vi1x0 = i1[0];
i1 += 1;
const float vk1x0 = w[1];
float vacc0p1 = vi1x0 * vk1x0;
w += 2;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
b[0] = vacc0p0;
b += 1;
}
}
// Last pass to process up to 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
float vacc0p0 = b[0];
float vacc1p0 = b[1];
float vacc2p0 = b[2];
float vacc3p0 = b[3];
b += 4;
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
const float vi0x2 = i0[2];
const float vi0x3 = i0[3];
i0 += 4;
const float vk0x0 = w[0];
const float vk0x1 = w[1];
const float vk0x2 = w[2];
const float vk0x3 = w[3];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
vacc2p0 = math_muladd_f32(vi0x2, vk0x2, vacc2p0);
vacc3p0 = math_muladd_f32(vi0x3, vk0x3, vacc3p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
const float vi1x2 = i1[2];
const float vi1x3 = i1[3];
i1 += 4;
const float vk1x0 = w[4];
const float vk1x1 = w[5];
const float vk1x2 = w[6];
const float vk1x3 = w[7];
float vacc0p1 = vi1x0 * vk1x0;
float vacc1p1 = vi1x1 * vk1x1;
float vacc2p1 = vi1x2 * vk1x2;
float vacc3p1 = vi1x3 * vk1x3;
w += 8;
// Add up all accumulators to vacc0123p0
vacc0p0 = vacc0p0 + vacc0p1;
vacc1p0 = vacc1p0 + vacc1p1;
vacc2p0 = vacc2p0 + vacc2p1;
vacc3p0 = vacc3p0 + vacc3p1;
output[0] = vacc0p0;
output[1] = vacc1p0;
output[2] = vacc2p0;
output[3] = vacc3p0;
output += 4;
}
for (; c != 0; c --) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
w += 2;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
*output++ = vacc0p0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,437 | 25.618297 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-2f2m2l4c1s1r-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_2f2m2l4c1s1r__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 2);
do {
const float* w = weights;
// First pass to process 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 1);
for (; c >= 4; c -= 4) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
float vacc2p0 = w[2];
float vacc3p0 = w[3];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
const float vi0x2 = i0[2];
const float vi0x3 = i0[3];
i0 += 4;
const float vk0x0 = w[4];
const float vk0x1 = w[5];
const float vk0x2 = w[6];
const float vk0x3 = w[7];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
vacc2p0 = math_muladd_f32(vi0x2, vk0x2, vacc2p0);
vacc3p0 = math_muladd_f32(vi0x3, vk0x3, vacc3p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
const float vi1x2 = i1[2];
const float vi1x3 = i1[3];
i1 += 4;
const float vk1x0 = w[8];
const float vk1x1 = w[9];
const float vk1x2 = w[10];
const float vk1x3 = w[11];
vacc0p0 = math_muladd_f32(vi1x0, vk1x0, vacc0p0);
vacc1p0 = math_muladd_f32(vi1x1, vk1x1, vacc1p0);
vacc2p0 = math_muladd_f32(vi1x2, vk1x2, vacc2p0);
vacc3p0 = math_muladd_f32(vi1x3, vk1x3, vacc3p0);
w += 12;
b[0] = vacc0p0;
b[1] = vacc1p0;
b[2] = vacc2p0;
b[3] = vacc3p0;
b += 4;
}
for (; c != 0; c --) {
float vacc0p0 = w[0];
const float vi0x0 = i0[0];
i0 += 1;
const float vk0x0 = w[1];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vi1x0 = i1[0];
i1 += 1;
const float vk1x0 = w[2];
vacc0p0 = math_muladd_f32(vi1x0, vk1x0, vacc0p0);
w += 3;
b[0] = vacc0p0;
b += 1;
}
}
// Middle pass to process 2 inputs in each iteration.
for (size_t ks = kernel_size - 2; ks > 2; ks -= 2) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
size_t c = round_up_po2(channels, 1);
for (; c >= 4; c -= 4) {
float vacc0p0 = b[0];
float vacc1p0 = b[1];
float vacc2p0 = b[2];
float vacc3p0 = b[3];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
const float vi0x2 = i0[2];
const float vi0x3 = i0[3];
i0 += 4;
const float vk0x0 = w[0];
const float vk0x1 = w[1];
const float vk0x2 = w[2];
const float vk0x3 = w[3];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
vacc2p0 = math_muladd_f32(vi0x2, vk0x2, vacc2p0);
vacc3p0 = math_muladd_f32(vi0x3, vk0x3, vacc3p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
const float vi1x2 = i1[2];
const float vi1x3 = i1[3];
i1 += 4;
const float vk1x0 = w[4];
const float vk1x1 = w[5];
const float vk1x2 = w[6];
const float vk1x3 = w[7];
vacc0p0 = math_muladd_f32(vi1x0, vk1x0, vacc0p0);
vacc1p0 = math_muladd_f32(vi1x1, vk1x1, vacc1p0);
vacc2p0 = math_muladd_f32(vi1x2, vk1x2, vacc2p0);
vacc3p0 = math_muladd_f32(vi1x3, vk1x3, vacc3p0);
w += 8;
b[0] = vacc0p0;
b[1] = vacc1p0;
b[2] = vacc2p0;
b[3] = vacc3p0;
b += 4;
}
for (; c != 0; c --) {
float vacc0p0 = b[0];
const float vi0x0 = i0[0];
i0 += 1;
const float vk0x0 = w[0];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vi1x0 = i1[0];
i1 += 1;
const float vk1x0 = w[1];
vacc0p0 = math_muladd_f32(vi1x0, vk1x0, vacc0p0);
w += 2;
b[0] = vacc0p0;
b += 1;
}
}
// Last pass to process up to 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
float vacc0p0 = b[0];
float vacc1p0 = b[1];
float vacc2p0 = b[2];
float vacc3p0 = b[3];
b += 4;
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
const float vi0x2 = i0[2];
const float vi0x3 = i0[3];
i0 += 4;
const float vk0x0 = w[0];
const float vk0x1 = w[1];
const float vk0x2 = w[2];
const float vk0x3 = w[3];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
vacc2p0 = math_muladd_f32(vi0x2, vk0x2, vacc2p0);
vacc3p0 = math_muladd_f32(vi0x3, vk0x3, vacc3p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
const float vi1x2 = i1[2];
const float vi1x3 = i1[3];
i1 += 4;
const float vk1x0 = w[4];
const float vk1x1 = w[5];
const float vk1x2 = w[6];
const float vk1x3 = w[7];
vacc0p0 = math_muladd_f32(vi1x0, vk1x0, vacc0p0);
vacc1p0 = math_muladd_f32(vi1x1, vk1x1, vacc1p0);
vacc2p0 = math_muladd_f32(vi1x2, vk1x2, vacc2p0);
vacc3p0 = math_muladd_f32(vi1x3, vk1x3, vacc3p0);
w += 8;
output[0] = vacc0p0;
output[1] = vacc1p0;
output[2] = vacc2p0;
output[3] = vacc3p0;
output += 4;
}
for (; c != 0; c --) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
w += 2;
*output++ = vacc0p0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,877 | 25.614865 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-2f2m2l4c4s4r-minmax-neon-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_2f2m2l4c4s4r__neon_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 2);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 2 inputs in each iteration.
for (size_t ks = kernel_size - 2; ks > 2; ks -= 2) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,595 | 27.845361 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-2f2m2l4c4s4r-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_2f2m2l4c4s4r__neon(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 2);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 2 inputs in each iteration.
for (size_t ks = kernel_size - 2; ks > 2; ks -= 2) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,170 | 26.801075 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-2f2m2l4c4s4r-minmax-neonfma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_2f2m2l4c4s4r__neonfma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 2);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 2 inputs in each iteration.
for (size_t ks = kernel_size - 2; ks > 2; ks -= 2) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,598 | 27.860825 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-2f2m2l4c4s4r-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_2f2m2l4c4s4r__neonfma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 2);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 2 inputs in each iteration.
for (size_t ks = kernel_size - 2; ks > 2; ks -= 2) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
size_t c = 0;
for (; c < channels; c += 4) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,173 | 26.817204 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-2f2m2l8c4s4r-minmax-neon-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_2f2m2l8c4s4r__neon_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 2);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 2 inputs in each iteration.
for (size_t ks = kernel_size - 2; ks > 2; ks -= 2) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b);
float32x4_t vacc4567p0 = vld1q_f32(b + 4);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
float32x4_t vacc4567p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,218 | 31.010417 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-2f2m2l8c4s4r-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_2f2m2l8c4s4r__neon(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 2);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi1x4567, vk1x4567);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 2 inputs in each iteration.
for (size_t ks = kernel_size - 2; ks > 2; ks -= 2) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b);
float32x4_t vacc4567p0 = vld1q_f32(b + 4);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi1x4567, vk1x4567);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
float32x4_t vacc4567p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi1x4567, vk1x4567);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,310 | 29.667897 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-2f2m2l8c4s4r-minmax-neonfma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_2f2m2l8c4s4r__neonfma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 2);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 2 inputs in each iteration.
for (size_t ks = kernel_size - 2; ks > 2; ks -= 2) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b);
float32x4_t vacc4567p0 = vld1q_f32(b + 4);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
float32x4_t vacc4567p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,221 | 31.020833 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-2f2m2l8c4s4r-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_2f2m2l8c4s4r__neonfma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 2);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* w = weights;
// First pass to process 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi1x4567, vk1x4567);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Middle pass to process 2 inputs in each iteration.
for (size_t ks = kernel_size - 2; ks > 2; ks -= 2) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
input += 2;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b);
float32x4_t vacc4567p0 = vld1q_f32(b + 4);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi1x4567, vk1x4567);
vst1q_f32(b, vacc0123p0); b += 4;
vst1q_f32(b, vacc4567p0); b += 4;
}
if (c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vst1q_f32(b, vacc0123p0); b += 4;
}
}
// Last pass to process up to 2 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
float32x4_t vacc4567p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi1x4567, vk1x4567);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(b); b += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(b);
const float32x4_t vi0x0123 = vld1q_f32(i0);
float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,313 | 29.678967 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3f3m3l1c1s1r-minmax-wasm-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_3f3m3l1c1s1r__wasm_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 3);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* w = weights;
// First pass to process 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
w += 4;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
*b++ = vacc0p0;
}
}
// Middle pass to process 3 inputs in each iteration.
for (size_t ks = kernel_size - 3; ks > 3; ks -= 3) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
w += 3;
*b++ = vacc0p0;
}
}
// Last pass to process up to 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
w += 3;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
*output++ = vacc0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,864 | 27.284884 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3f3m3l1c1s1r-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_3f3m3l1c1s1r__wasm(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 3);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* w = weights;
// First pass to process 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
w += 4;
*b++ = vacc0p0;
}
}
// Middle pass to process 3 inputs in each iteration.
for (size_t ks = kernel_size - 3; ks > 3; ks -= 3) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
w += 3;
*b++ = vacc0p0;
}
}
// Last pass to process up to 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
w += 3;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
*output++ = vacc0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,667 | 27.120482 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3f3m3l1c1s1r-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_3f3m3l1c1s1r__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 3);
do {
const float* w = weights;
// First pass to process 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
w += 4;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
*b++ = vacc0p0;
}
}
// Middle pass to process 3 inputs in each iteration.
for (size_t ks = kernel_size - 3; ks > 3; ks -= 3) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
w += 3;
*b++ = vacc0p0;
}
}
// Last pass to process up to 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
w += 3;
// Add up all accumulators to vacc0p0
vacc0p0 = vacc0p0 + vacc0p1;
*output++ = vacc0p0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,666 | 26.779762 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3f3m3l1c1s1r-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_3f3m3l1c1s1r__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 3);
do {
const float* w = weights;
// First pass to process 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
// Process c channels and write to buffer.
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
w += 4;
*b++ = vacc0p0;
}
}
// Middle pass to process 3 inputs in each iteration.
for (size_t ks = kernel_size - 3; ks > 3; ks -= 3) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
w += 3;
*b++ = vacc0p0;
}
}
// Last pass to process up to 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
for (size_t c = channels; c >= 1; c -= 1) {
float vacc0p0 = *b++;
const float vi0 = *i0++;
const float vk0 = w[0];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[1];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[2];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
w += 3;
*output++ = vacc0p0;
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,469 | 26.592593 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3f3m3l4c4s4r-minmax-wasmsimd-arm-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_3f3m3l4c4s4r__wasmsimd_arm_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 3);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* w = weights;
// First pass to process 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 16;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Middle pass to process 3 inputs in each iteration.
for (size_t ks = kernel_size - 3; ks > 3; ks -= 3) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 12;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Last pass to process up to 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
v128_t vacc0p0 = wasm_v128_load(b);
b += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 12;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
v128_t vacc0 = wasm_f32x4_max(vacc0p0, vmin);
vacc0 = wasm_f32x4_min(vacc0, vmax);
wasm_v128_store(output, vacc0);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
// Add up all accumulators to vacc0123p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
v128_t vacc0 = wasm_f32x4_max(vacc0p0, vmin);
vacc0 = wasm_f32x4_min(vacc0, vmax);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0, 0);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,017 | 27.072 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3f3m3l4c4s4r-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_3f3m3l4c4s4r__wasmsimd_arm(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 3);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* w = weights;
// First pass to process 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 16;
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Middle pass to process 3 inputs in each iteration.
for (size_t ks = kernel_size - 3; ks > 3; ks -= 3) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 12;
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Last pass to process up to 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
v128_t vacc0p0 = wasm_v128_load(b);
b += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 12;
v128_t vacc0 = wasm_f32x4_max(vacc0p0, vmin);
vacc0 = wasm_f32x4_min(vacc0, vmax);
wasm_v128_store(output, vacc0);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
v128_t vacc0 = wasm_f32x4_max(vacc0p0, vmin);
vacc0 = wasm_f32x4_min(vacc0, vmax);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0, 0);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,689 | 26.644628 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3f3m3l4c4s4r-minmax-wasmsimd-x86-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_3f3m3l4c4s4r__wasmsimd_x86_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 3);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* w = weights;
// First pass to process 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 16;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Middle pass to process 3 inputs in each iteration.
for (size_t ks = kernel_size - 3; ks > 3; ks -= 3) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 12;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Last pass to process up to 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
v128_t vacc0p0 = wasm_v128_load(b);
b += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 12;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
v128_t vacc0 = wasm_f32x4_pmax(vacc0p0, vmin);
vacc0 = wasm_f32x4_pmin(vacc0, vmax);
wasm_v128_store(output, vacc0);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
// Add up all accumulators to vacc0123p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
v128_t vacc0 = wasm_f32x4_pmax(vacc0p0, vmin);
vacc0 = wasm_f32x4_pmin(vacc0, vmax);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0, 0);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,021 | 27.088 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3f3m3l4c4s4r-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_3f3m3l4c4s4r__wasmsimd_x86(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 3);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* w = weights;
// First pass to process 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 16;
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Middle pass to process 3 inputs in each iteration.
for (size_t ks = kernel_size - 3; ks > 3; ks -= 3) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 12;
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Last pass to process up to 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
v128_t vacc0p0 = wasm_v128_load(b);
b += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 12;
v128_t vacc0 = wasm_f32x4_pmax(vacc0p0, vmin);
vacc0 = wasm_f32x4_pmin(vacc0, vmax);
wasm_v128_store(output, vacc0);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
v128_t vacc0 = wasm_f32x4_pmax(vacc0p0, vmin);
vacc0 = wasm_f32x4_pmin(vacc0, vmax);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0, 0);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,693 | 26.661157 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3f3m3l4c4s4r-wasmsimd-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_3f3m3l4c4s4r__wasmsimd_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 3);
do {
const float* w = weights;
// First pass to process 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 16;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Middle pass to process 3 inputs in each iteration.
for (size_t ks = kernel_size - 3; ks > 3; ks -= 3) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 12;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Last pass to process up to 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
v128_t vacc0p0 = wasm_v128_load(b);
b += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 12;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
const v128_t vacc0 = vacc0p0;
wasm_v128_store(output, vacc0);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
// Add up all accumulators to vacc0123p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
v128_t vacc0 = vacc0p0;
if (c & 2) {
wasm_v128_store64_lane(output, vacc0, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0, 0);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,741 | 26.631148 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3f3m3l4c4s4r-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_3f3m3l4c4s4r__wasmsimd(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 3);
do {
const float* w = weights;
// First pass to process 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
// Process c channels and write to buffer.
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 16;
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Middle pass to process 3 inputs in each iteration.
for (size_t ks = kernel_size - 3; ks > 3; ks -= 3) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
size_t c = 0;
for (; c < channels; c += 4) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 12;
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Last pass to process up to 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
v128_t vacc0p0 = wasm_v128_load(b);
b += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 12;
const v128_t vacc0 = vacc0p0;
wasm_v128_store(output, vacc0);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
v128_t vacc0 = vacc0p0;
if (c & 2) {
wasm_v128_store64_lane(output, vacc0, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0, 0);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,413 | 26.177966 | 90 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.