repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3f3m3l8c4s4r-minmax-wasmsimd-arm-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_3f3m3l8c4s4r__wasmsimd_arm_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 3);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* w = weights;
// First pass to process 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 32;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
wasm_v128_store(b, vacc0123p0);
wasm_v128_store(b + 4, vacc4567p0);
b += 8;
}
if (c != 0) {
v128_t vacc0p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 16;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Middle pass to process 3 inputs in each iteration.
for (size_t ks = kernel_size - 3; ks > 3; ks -= 3) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(b);
v128_t vacc4567p0 = wasm_v128_load(b + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w);
const v128_t vk0x4567 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
const v128_t vk1x4567 = wasm_v128_load(w + 12);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 16);
const v128_t vk2x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 24;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
wasm_v128_store(b, vacc0123p0);
wasm_v128_store(b + 4, vacc4567p0);
b += 8;
}
if (c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 12;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Last pass to process up to 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(b);
v128_t vacc4567p0 = wasm_v128_load(b + 4);
b += 8;
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
v128_t vk0x0123 = wasm_v128_load(w);
v128_t vk0x4567 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vk1x4567 = wasm_v128_load(w + 12);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
v128_t vk2x0123 = wasm_v128_load(w + 16);
v128_t vk2x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 24;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
v128_t vacc4567 = wasm_f32x4_max(vacc4567p0, vmin);
vacc0123 = wasm_f32x4_min(vacc0123, vmax);
vacc4567 = wasm_f32x4_min(vacc4567, vmax);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0p0 = wasm_v128_load(b);
b += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 12;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
v128_t vacc0 = wasm_f32x4_max(vacc0p0, vmin);
vacc0 = wasm_f32x4_min(vacc0, vmax);
wasm_v128_store(output, vacc0);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
// Add up all accumulators to vacc0123p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
v128_t vacc0 = wasm_f32x4_max(vacc0p0, vmin);
vacc0 = wasm_f32x4_min(vacc0, vmax);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0, 0);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 12,078 | 30.131443 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3f3m3l8c4s4r-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_3f3m3l8c4s4r__wasmsimd_arm(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 3);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* w = weights;
// First pass to process 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vk1x4567), vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 32;
wasm_v128_store(b, vacc0123p0);
wasm_v128_store(b + 4, vacc4567p0);
b += 8;
}
if (c != 0) {
v128_t vacc0p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 16;
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Middle pass to process 3 inputs in each iteration.
for (size_t ks = kernel_size - 3; ks > 3; ks -= 3) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(b);
v128_t vacc4567p0 = wasm_v128_load(b + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w);
const v128_t vk0x4567 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
const v128_t vk1x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vk1x4567), vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 16);
const v128_t vk2x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 24;
wasm_v128_store(b, vacc0123p0);
wasm_v128_store(b + 4, vacc4567p0);
b += 8;
}
if (c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 12;
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Last pass to process up to 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(b);
v128_t vacc4567p0 = wasm_v128_load(b + 4);
b += 8;
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
v128_t vk0x0123 = wasm_v128_load(w);
v128_t vk0x4567 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vk1x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vk1x4567), vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
v128_t vk2x0123 = wasm_v128_load(w + 16);
v128_t vk2x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 24;
v128_t vacc0123 = wasm_f32x4_max(vacc0123p0, vmin);
v128_t vacc4567 = wasm_f32x4_max(vacc4567p0, vmin);
vacc0123 = wasm_f32x4_min(vacc0123, vmax);
vacc4567 = wasm_f32x4_min(vacc4567, vmax);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0p0 = wasm_v128_load(b);
b += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 12;
v128_t vacc0 = wasm_f32x4_max(vacc0p0, vmin);
vacc0 = wasm_f32x4_min(vacc0, vmax);
wasm_v128_store(output, vacc0);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
v128_t vacc0 = wasm_f32x4_max(vacc0p0, vmin);
vacc0 = wasm_f32x4_min(vacc0, vmax);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0, 0);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,363 | 29.630728 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3f3m3l8c4s4r-minmax-wasmsimd-x86-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_3f3m3l8c4s4r__wasmsimd_x86_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 3);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* w = weights;
// First pass to process 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 32;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
wasm_v128_store(b, vacc0123p0);
wasm_v128_store(b + 4, vacc4567p0);
b += 8;
}
if (c != 0) {
v128_t vacc0p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 16;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Middle pass to process 3 inputs in each iteration.
for (size_t ks = kernel_size - 3; ks > 3; ks -= 3) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(b);
v128_t vacc4567p0 = wasm_v128_load(b + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w);
const v128_t vk0x4567 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
const v128_t vk1x4567 = wasm_v128_load(w + 12);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 16);
const v128_t vk2x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 24;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
wasm_v128_store(b, vacc0123p0);
wasm_v128_store(b + 4, vacc4567p0);
b += 8;
}
if (c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 12;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Last pass to process up to 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(b);
v128_t vacc4567p0 = wasm_v128_load(b + 4);
b += 8;
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
v128_t vk0x0123 = wasm_v128_load(w);
v128_t vk0x4567 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vk1x4567 = wasm_v128_load(w + 12);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
v128_t vk2x0123 = wasm_v128_load(w + 16);
v128_t vk2x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 24;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
v128_t vacc0123 = wasm_f32x4_pmax(vacc0123p0, vmin);
v128_t vacc4567 = wasm_f32x4_pmax(vacc4567p0, vmin);
vacc0123 = wasm_f32x4_pmin(vacc0123, vmax);
vacc4567 = wasm_f32x4_pmin(vacc4567, vmax);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0p0 = wasm_v128_load(b);
b += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 12;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
v128_t vacc0 = wasm_f32x4_pmax(vacc0p0, vmin);
vacc0 = wasm_f32x4_pmin(vacc0, vmax);
wasm_v128_store(output, vacc0);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
// Add up all accumulators to vacc0123p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
v128_t vacc0 = wasm_f32x4_pmax(vacc0p0, vmin);
vacc0 = wasm_f32x4_pmin(vacc0, vmax);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0, 0);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 12,086 | 30.152062 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3f3m3l8c4s4r-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_3f3m3l8c4s4r__wasmsimd_x86(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 3);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* w = weights;
// First pass to process 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vk1x4567), vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 32;
wasm_v128_store(b, vacc0123p0);
wasm_v128_store(b + 4, vacc4567p0);
b += 8;
}
if (c != 0) {
v128_t vacc0p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 16;
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Middle pass to process 3 inputs in each iteration.
for (size_t ks = kernel_size - 3; ks > 3; ks -= 3) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(b);
v128_t vacc4567p0 = wasm_v128_load(b + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w);
const v128_t vk0x4567 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
const v128_t vk1x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vk1x4567), vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 16);
const v128_t vk2x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 24;
wasm_v128_store(b, vacc0123p0);
wasm_v128_store(b + 4, vacc4567p0);
b += 8;
}
if (c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 12;
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Last pass to process up to 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(b);
v128_t vacc4567p0 = wasm_v128_load(b + 4);
b += 8;
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
v128_t vk0x0123 = wasm_v128_load(w);
v128_t vk0x4567 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vk1x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vk1x4567), vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
v128_t vk2x0123 = wasm_v128_load(w + 16);
v128_t vk2x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 24;
v128_t vacc0123 = wasm_f32x4_pmax(vacc0123p0, vmin);
v128_t vacc4567 = wasm_f32x4_pmax(vacc4567p0, vmin);
vacc0123 = wasm_f32x4_pmin(vacc0123, vmax);
vacc4567 = wasm_f32x4_pmin(vacc4567, vmax);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0p0 = wasm_v128_load(b);
b += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 12;
v128_t vacc0 = wasm_f32x4_pmax(vacc0p0, vmin);
vacc0 = wasm_f32x4_pmin(vacc0, vmax);
wasm_v128_store(output, vacc0);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
v128_t vacc0 = wasm_f32x4_pmax(vacc0p0, vmin);
vacc0 = wasm_f32x4_pmin(vacc0, vmax);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0, 0);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,371 | 29.652291 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3f3m3l8c4s4r-wasmsimd-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_3f3m3l8c4s4r__wasmsimd_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 3);
do {
const float* w = weights;
// First pass to process 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 32;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
wasm_v128_store(b, vacc0123p0);
wasm_v128_store(b + 4, vacc4567p0);
b += 8;
}
if (c != 0) {
v128_t vacc0p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 16;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Middle pass to process 3 inputs in each iteration.
for (size_t ks = kernel_size - 3; ks > 3; ks -= 3) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(b);
v128_t vacc4567p0 = wasm_v128_load(b + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w);
const v128_t vk0x4567 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
const v128_t vk1x4567 = wasm_v128_load(w + 12);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 16);
const v128_t vk2x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 24;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
wasm_v128_store(b, vacc0123p0);
wasm_v128_store(b + 4, vacc4567p0);
b += 8;
}
if (c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 12;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Last pass to process up to 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(b);
v128_t vacc4567p0 = wasm_v128_load(b + 4);
b += 8;
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
v128_t vk0x0123 = wasm_v128_load(w);
v128_t vk0x4567 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vk1x4567 = wasm_v128_load(w + 12);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
v128_t vk2x0123 = wasm_v128_load(w + 16);
v128_t vk2x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 24;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
const v128_t vacc0123 = vacc0123p0;
const v128_t vacc4567 = vacc4567p0;
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0p0 = wasm_v128_load(b);
b += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 12;
// Add up all accumulators to vacc0p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
const v128_t vacc0 = vacc0p0;
wasm_v128_store(output, vacc0);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vk1x0123 = wasm_v128_load(w + 4);
v128_t vacc0p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
// Add up all accumulators to vacc0123p0
vacc0p0 = wasm_f32x4_add(vacc0p0, vacc0p1);
v128_t vacc0 = vacc0p0;
if (c & 2) {
wasm_v128_store64_lane(output, vacc0, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0, 0);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,667 | 29.78628 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3f3m3l8c4s4r-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_3f3m3l8c4s4r__wasmsimd(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
size_t kernel_size,
float* buffer,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 3);
do {
const float* w = weights;
// First pass to process 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
// Process c channels and write to buffer.
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vk1x4567), vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 32;
wasm_v128_store(b, vacc0123p0);
wasm_v128_store(b + 4, vacc4567p0);
b += 8;
}
if (c != 0) {
v128_t vacc0p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 16;
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Middle pass to process 3 inputs in each iteration.
for (size_t ks = kernel_size - 3; ks > 3; ks -= 3) {
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input += 3;
size_t c = round_up_po2(channels, 4);
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(b);
v128_t vacc4567p0 = wasm_v128_load(b + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w);
const v128_t vk0x4567 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
const v128_t vk1x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vk1x4567), vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 16);
const v128_t vk2x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 24;
wasm_v128_store(b, vacc0123p0);
wasm_v128_store(b + 4, vacc4567p0);
b += 8;
}
if (c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 12;
wasm_v128_store(b, vacc0p0);
b += 4;
}
}
// Last pass to process up to 3 inputs.
{
float* b = buffer;
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(b);
v128_t vacc4567p0 = wasm_v128_load(b + 4);
b += 8;
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
v128_t vk0x0123 = wasm_v128_load(w);
v128_t vk0x4567 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vk1x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vk1x4567), vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
v128_t vk2x0123 = wasm_v128_load(w + 16);
v128_t vk2x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 24;
const v128_t vacc0123 = vacc0123p0;
const v128_t vacc4567 = vacc4567p0;
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0p0 = wasm_v128_load(b);
b += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
w += 12;
const v128_t vacc0 = vacc0p0;
wasm_v128_store(output, vacc0);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0p0 = wasm_v128_load(b);
const v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vk0x0123 = wasm_v128_load(w);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vk1x0123 = wasm_v128_load(w + 4);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vk2x0123 = wasm_v128_load(w + 8);
vacc0p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0p0);
v128_t vacc0 = vacc0p0;
if (c & 2) {
wasm_v128_store64_lane(output, vacc0, 0);
vacc0 = wasm_v64x2_shuffle(vacc0, vacc0, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0, 0);
output += 1;
}
}
}
input = (const float**) ((uintptr_t) input + input_stride);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,952 | 29.256906 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p16c-minmax-avx-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p16c__avx_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(w + 8);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
const __m256 vk0x89ABCDEF = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi0x89ABCDEF, vk0x89ABCDEF));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
const __m256 vk1x89ABCDEF = _mm256_load_ps(w + 40);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
__m256 vacc89ABCDEFp1 = _mm256_mul_ps(vi1x89ABCDEF, vk1x89ABCDEF);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
const __m256 vk2x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi2x89ABCDEF, vk2x89ABCDEF));
w += 64;
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, vacc89ABCDEFp1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
__m256 vacc89ABCDEF = _mm256_max_ps(vmin, vacc89ABCDEFp0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
vacc89ABCDEF = _mm256_min_ps(vmax, vacc89ABCDEF);
_mm256_storeu_ps(output, vacc01234567);
_mm256_storeu_ps(output + 8, vacc89ABCDEF);
output += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
w += 8;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,211 | 33.898876 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p16c-minmax-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p16c__avx(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(w + 8);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
const __m256 vk0x89ABCDEF = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi0x89ABCDEF, vk0x89ABCDEF));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
const __m256 vk1x89ABCDEF = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi1x89ABCDEF, vk1x89ABCDEF));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
const __m256 vk2x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi2x89ABCDEF, vk2x89ABCDEF));
w += 64;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
__m256 vacc89ABCDEF = _mm256_max_ps(vmin, vacc89ABCDEFp0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
vacc89ABCDEF = _mm256_min_ps(vmax, vacc89ABCDEF);
_mm256_storeu_ps(output, vacc01234567);
_mm256_storeu_ps(output + 8, vacc89ABCDEF);
output += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
w += 8;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,861 | 33.280702 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p16c-minmax-avx512f-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx512.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_dwconv_minmax_ukernel_3p16c__avx512f_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
__m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0);
i0 += 16;
const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 16);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1);
i1 += 16;
const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 32);
__m512 vacc0123456789ABCDEFp1 = _mm512_mul_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2);
i2 += 16;
const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 48);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
w += 64;
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc0123456789ABCDEFp0 = _mm512_add_ps(vacc0123456789ABCDEFp0, vacc0123456789ABCDEFp1);
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 16);
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
__m512 vacc0123456789ABCDEFp0 = _mm512_maskz_loadu_ps(vmask, w);
const __m512 vi0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i0);
const __m512 vk0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 16);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i1);
const __m512 vk1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 32);
__m512 vacc0123456789ABCDEFp1 = _mm512_mul_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
const __m512 vi2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i2);
const __m512 vk2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 48);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc0123456789ABCDEFp0 = _mm512_add_ps(vacc0123456789ABCDEFp0, vacc0123456789ABCDEFp1);
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_mask_storeu_ps(output, vmask, vacc0123456789ABCDEF);
output += c;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,410 | 35.454545 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p16c-minmax-avx512f.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx512.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_dwconv_minmax_ukernel_3p16c__avx512f(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
__m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0);
i0 += 16;
const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 16);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1);
i1 += 16;
const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 32);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2);
i2 += 16;
const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 48);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
w += 64;
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 16);
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
__m512 vacc0123456789ABCDEFp0 = _mm512_maskz_loadu_ps(vmask, w);
const __m512 vi0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i0);
const __m512 vk0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 16);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i1);
const __m512 vk1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 32);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i2);
const __m512 vk2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 48);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_mask_storeu_ps(output, vmask, vacc0123456789ABCDEF);
output += c;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,137 | 34.367521 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p16c-minmax-fma3-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p16c__fma3_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(w + 8);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
const __m256 vk0x89ABCDEF = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi0x89ABCDEF, vk0x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
const __m256 vk1x89ABCDEF = _mm256_load_ps(w + 40);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
__m256 vacc89ABCDEFp1 = _mm256_mul_ps(vi1x89ABCDEF, vk1x89ABCDEF);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
const __m256 vk2x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi2x89ABCDEF, vk2x89ABCDEF, vacc89ABCDEFp0);
w += 64;
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, vacc89ABCDEFp1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
__m256 vacc89ABCDEF = _mm256_max_ps(vmin, vacc89ABCDEFp0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
vacc89ABCDEF = _mm256_min_ps(vmax, vacc89ABCDEF);
_mm256_storeu_ps(output, vacc01234567);
_mm256_storeu_ps(output + 8, vacc89ABCDEF);
output += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
w += 8;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,108 | 33.320225 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p16c-minmax-fma3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p16c__fma3(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(w + 8);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
const __m256 vk0x89ABCDEF = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi0x89ABCDEF, vk0x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
const __m256 vk1x89ABCDEF = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi1x89ABCDEF, vk1x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
const __m256 vk2x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi2x89ABCDEF, vk2x89ABCDEF, vacc89ABCDEFp0);
w += 64;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
__m256 vacc89ABCDEF = _mm256_max_ps(vmin, vacc89ABCDEFp0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
vacc89ABCDEF = _mm256_min_ps(vmax, vacc89ABCDEF);
_mm256_storeu_ps(output, vacc01234567);
_mm256_storeu_ps(output + 8, vacc89ABCDEF);
output += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
w += 8;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,706 | 32.374269 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p16c-minmax-neon-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p16c__neon_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
float32x4_t vacc89ABp0 = vld1q_f32(w); w += 4;
float32x4_t vaccCDEFp0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
float32x4_t vacc89ABp1 = vmulq_f32(vi1x89AB, vk1x89AB);
float32x4_t vaccCDEFp1 = vmulq_f32(vi1xCDEF, vk1xCDEF);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2xCDEF = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk2x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk2xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi2x89AB, vk2x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi2xCDEF, vk2xCDEF);
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vacc89ABp0 = vaddq_f32(vacc89ABp0, vacc89ABp1);
vaccCDEFp0 = vaddq_f32(vaccCDEFp0, vaccCDEFp1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
float32x4_t vacc89AB = vmaxq_f32(vacc89ABp0, vmin);
float32x4_t vaccCDEF = vmaxq_f32(vaccCDEFp0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vacc89AB = vminq_f32(vacc89AB, vmax);
vaccCDEF = vminq_f32(vaccCDEF, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
vst1q_f32(output, vacc89AB); output += 4;
vst1q_f32(output, vaccCDEF); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 12);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 28);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 44);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 16);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 32);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 48);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,784 | 36.694444 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p16c-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p16c__neon(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
float32x4_t vacc89ABp0 = vld1q_f32(w); w += 4;
float32x4_t vaccCDEFp0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi1x4567, vk1x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi1x89AB, vk1x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi1xCDEF, vk1xCDEF);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2xCDEF = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk2x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk2xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi2x89AB, vk2x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi2xCDEF, vk2xCDEF);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
float32x4_t vacc89AB = vmaxq_f32(vacc89ABp0, vmin);
float32x4_t vaccCDEF = vmaxq_f32(vaccCDEFp0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vacc89AB = vminq_f32(vacc89AB, vmax);
vaccCDEF = vminq_f32(vaccCDEF, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
vst1q_f32(output, vacc89AB); output += 4;
vst1q_f32(output, vaccCDEF); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 12);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 28);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 44);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 16);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 32);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 48);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,302 | 35.859649 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p16c-minmax-neonfma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p16c__neonfma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
float32x4_t vacc89ABp0 = vld1q_f32(w); w += 4;
float32x4_t vaccCDEFp0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
float32x4_t vacc89ABp1 = vmulq_f32(vi1x89AB, vk1x89AB);
float32x4_t vaccCDEFp1 = vmulq_f32(vi1xCDEF, vk1xCDEF);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2xCDEF = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk2x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk2xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi2x89AB, vk2x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi2xCDEF, vk2xCDEF);
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vacc89ABp0 = vaddq_f32(vacc89ABp0, vacc89ABp1);
vaccCDEFp0 = vaddq_f32(vaccCDEFp0, vaccCDEFp1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
float32x4_t vacc89AB = vmaxq_f32(vacc89ABp0, vmin);
float32x4_t vaccCDEF = vmaxq_f32(vaccCDEFp0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vacc89AB = vminq_f32(vacc89AB, vmax);
vaccCDEF = vminq_f32(vaccCDEF, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
vst1q_f32(output, vacc89AB); output += 4;
vst1q_f32(output, vaccCDEF); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 12);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 28);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 44);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 16);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 32);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 48);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,787 | 36.711111 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p16c-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p16c__neonfma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
float32x4_t vacc89ABp0 = vld1q_f32(w); w += 4;
float32x4_t vaccCDEFp0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi1x4567, vk1x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi1x89AB, vk1x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi1xCDEF, vk1xCDEF);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2xCDEF = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk2x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk2xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi2x89AB, vk2x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi2xCDEF, vk2xCDEF);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
float32x4_t vacc89AB = vmaxq_f32(vacc89ABp0, vmin);
float32x4_t vaccCDEF = vmaxq_f32(vaccCDEFp0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vacc89AB = vminq_f32(vacc89AB, vmax);
vaccCDEF = vminq_f32(vaccCDEF, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
vst1q_f32(output, vacc89AB); output += 4;
vst1q_f32(output, vaccCDEF); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 12);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 28);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 44);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 16);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 32);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 48);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,305 | 35.877193 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p1c-minmax-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_3p1c__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
do {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
w += 4;
vacc0p0 += vacc0p1;
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 2,097 | 25.225 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p1c-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_3p1c__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
do {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
w += 4;
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 2,085 | 25.405063 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p1c-minmax-wasm-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_3p1c__wasm_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
do {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
w += 4;
vacc0p0 += vacc0p1;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
*output++ = vacc0;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 2,115 | 25.45 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p1c-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_3p1c__wasm(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
do {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
w += 4;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
*output++ = vacc0;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 2,103 | 25.632911 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p1c-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_3p1c__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
do {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
w += 4;
vacc0p0 += vacc0p1;
*output++ = vacc0p0;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 1,921 | 24.289474 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p1c-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_3p1c__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
do {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
w += 4;
*output++ = vacc0p0;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 1,909 | 24.466667 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p2c-minmax-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_3p2c__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 2; c -= 2) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
i0 += 2;
const float vk0x0 = w[2];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vk0x1 = w[3];
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
i1 += 2;
const float vk1x0 = w[4];
float vacc0p1 = vi1x0 * vk1x0;
const float vk1x1 = w[5];
float vacc1p1 = vi1x1 * vk1x1;
const float vi2x0 = i2[0];
const float vi2x1 = i2[1];
i2 += 2;
const float vk2x0 = w[6];
vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
const float vk2x1 = w[7];
vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
w += 8;
// Add up all accumulators to vacc01p0
vacc0p0 = vacc0p0 + vacc0p1;
vacc1p0 = vacc1p0 + vacc1p1;
float vacc0 = math_max_f32(vacc0p0, vmin);
float vacc1 = math_max_f32(vacc1p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
vacc1 = math_min_f32(vacc1, vmax);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
for (; c >= 1; c -= 1) {
float vacc0p0 = *w++;
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[3];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[5];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
// Add up all accumulators to vacc01p0
vacc0p0 = vacc0p0 + vacc0p1;
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,350 | 25.808 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p2c-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_3p2c__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 2; c -= 2) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
i0 += 2;
const float vk0x0 = w[2];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vk0x1 = w[3];
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
i1 += 2;
const float vk1x0 = w[4];
vacc0p0 = math_muladd_f32(vi1x0, vk1x0, vacc0p0);
const float vk1x1 = w[5];
vacc1p0 = math_muladd_f32(vi1x1, vk1x1, vacc1p0);
const float vi2x0 = i2[0];
const float vi2x1 = i2[1];
i2 += 2;
const float vk2x0 = w[6];
vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
const float vk2x1 = w[7];
vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
w += 8;
float vacc0 = math_max_f32(vacc0p0, vmin);
float vacc1 = math_max_f32(vacc1p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
vacc1 = math_min_f32(vacc1, vmax);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
for (; c >= 1; c -= 1) {
float vacc0p0 = *w++;
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[3];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[5];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,207 | 25.733333 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p2c-minmax-wasm-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_3p2c__wasm_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 2; c -= 2) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
i0 += 2;
const float vk0x0 = w[2];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vk0x1 = w[3];
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
i1 += 2;
const float vk1x0 = w[4];
float vacc0p1 = vi1x0 * vk1x0;
const float vk1x1 = w[5];
float vacc1p1 = vi1x1 * vk1x1;
const float vi2x0 = i2[0];
const float vi2x1 = i2[1];
i2 += 2;
const float vk2x0 = w[6];
vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
const float vk2x1 = w[7];
vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
w += 8;
// Add up all accumulators to vacc01p0
vacc0p0 = vacc0p0 + vacc0p1;
vacc1p0 = vacc1p0 + vacc1p1;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
float vacc1 = __builtin_wasm_max_f32(vacc1p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
vacc1 = __builtin_wasm_min_f32(vacc1, vmax);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
for (; c >= 1; c -= 1) {
float vacc0p0 = *w++;
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[3];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[5];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
// Add up all accumulators to vacc01p0
vacc0p0 = vacc0p0 + vacc0p1;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
*output++ = vacc0;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,408 | 26.272 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p2c-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_3p2c__wasm(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 2; c -= 2) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
i0 += 2;
const float vk0x0 = w[2];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vk0x1 = w[3];
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
i1 += 2;
const float vk1x0 = w[4];
vacc0p0 = math_muladd_f32(vi1x0, vk1x0, vacc0p0);
const float vk1x1 = w[5];
vacc1p0 = math_muladd_f32(vi1x1, vk1x1, vacc1p0);
const float vi2x0 = i2[0];
const float vi2x1 = i2[1];
i2 += 2;
const float vk2x0 = w[6];
vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
const float vk2x1 = w[7];
vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
w += 8;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
float vacc1 = __builtin_wasm_max_f32(vacc1p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
vacc1 = __builtin_wasm_min_f32(vacc1, vmax);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
for (; c >= 1; c -= 1) {
float vacc0p0 = *w++;
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[3];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[5];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
*output++ = vacc0;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,265 | 26.216667 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p2c-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_3p2c__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 2; c -= 2) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
i0 += 2;
const float vk0x0 = w[2];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vk0x1 = w[3];
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
i1 += 2;
const float vk1x0 = w[4];
float vacc0p1 = vi1x0 * vk1x0;
const float vk1x1 = w[5];
float vacc1p1 = vi1x1 * vk1x1;
const float vi2x0 = i2[0];
const float vi2x1 = i2[1];
i2 += 2;
const float vk2x0 = w[6];
vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
const float vk2x1 = w[7];
vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
w += 8;
// Add up all accumulators to vacc01p0
vacc0p0 = vacc0p0 + vacc0p1;
vacc1p0 = vacc1p0 + vacc1p1;
output[0] = vacc0p0;
output[1] = vacc1p0;
output += 2;
}
for (; c >= 1; c -= 1) {
float vacc0p0 = *w++;
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[3];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[5];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
// Add up all accumulators to vacc01p0
vacc0p0 = vacc0p0 + vacc0p1;
*output++ = vacc0p0;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 2,996 | 25.06087 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p2c-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_3p2c__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 2; c -= 2) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
i0 += 2;
const float vk0x0 = w[2];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vk0x1 = w[3];
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
i1 += 2;
const float vk1x0 = w[4];
vacc0p0 = math_muladd_f32(vi1x0, vk1x0, vacc0p0);
const float vk1x1 = w[5];
vacc1p0 = math_muladd_f32(vi1x1, vk1x1, vacc1p0);
const float vi2x0 = i2[0];
const float vi2x1 = i2[1];
i2 += 2;
const float vk2x0 = w[6];
vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
const float vk2x1 = w[7];
vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
w += 8;
output[0] = vacc0p0;
output[1] = vacc1p0;
output += 2;
}
for (; c >= 1; c -= 1) {
float vacc0p0 = *w++;
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[3];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[5];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
*output++ = vacc0p0;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 2,853 | 24.945455 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p32c-minmax-avx512f-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx512.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_dwconv_minmax_ukernel_3p32c__avx512f_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 32; c -= 32) {
__m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
__m512 vaccGHIJKLMNOPQRSTUVp0 = _mm512_load_ps(w + 16);
const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0);
const __m512 vi0xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i0 + 16);
i0 += 32;
const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 32);
const __m512 vk0xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 48);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi0xGHIJKLMNOPQRSTUV, vk0xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1);
const __m512 vi1xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i1 + 16);
i1 += 32;
const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 64);
const __m512 vk1xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 80);
__m512 vacc0123456789ABCDEFp1 = _mm512_mul_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
__m512 vaccGHIJKLMNOPQRSTUVp1 = _mm512_mul_ps(vi1xGHIJKLMNOPQRSTUV, vk1xGHIJKLMNOPQRSTUV);
const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2);
const __m512 vi2xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i2 + 16);
i2 += 32;
const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 96);
const __m512 vk2xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 112);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi2xGHIJKLMNOPQRSTUV, vk2xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
w += 128;
// Add up all accumulators to vacc0123456789ABCDEFGHIJKLMNOPQRSTUVp0
vacc0123456789ABCDEFp0 = _mm512_add_ps(vacc0123456789ABCDEFp0, vacc0123456789ABCDEFp1);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_add_ps(vaccGHIJKLMNOPQRSTUVp0, vaccGHIJKLMNOPQRSTUVp1);
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
__m512 vaccGHIJKLMNOPQRSTUV = _mm512_max_ps(vmin, vaccGHIJKLMNOPQRSTUVp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
vaccGHIJKLMNOPQRSTUV = _mm512_min_ps(vmax, vaccGHIJKLMNOPQRSTUV);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
_mm512_storeu_ps(output + 16, vaccGHIJKLMNOPQRSTUV);
output += 32;
}
for (; c >= 16; c -= 16) {
__m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0);
i0 += 16;
const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 32);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1);
i1 += 16;
const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 64);
__m512 vacc0123456789ABCDEFp1 = _mm512_mul_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2);
i2 += 16;
const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 96);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
w += 16;
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc0123456789ABCDEFp0 = _mm512_add_ps(vacc0123456789ABCDEFp0, vacc0123456789ABCDEFp1);
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 16);
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
__m512 vacc0123456789ABCDEFp0 = _mm512_maskz_loadu_ps(vmask, w);
const __m512 vi0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i0);
const __m512 vk0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 32);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i1);
const __m512 vk1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 64);
__m512 vacc0123456789ABCDEFp1 = _mm512_mul_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
const __m512 vi2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i2);
const __m512 vk2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 96);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc0123456789ABCDEFp0 = _mm512_add_ps(vacc0123456789ABCDEFp0, vacc0123456789ABCDEFp1);
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_mask_storeu_ps(output, vmask, vacc0123456789ABCDEF);
output += c;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,790 | 39.664671 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p32c-minmax-avx512f.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx512.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_dwconv_minmax_ukernel_3p32c__avx512f(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 32; c -= 32) {
__m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
__m512 vaccGHIJKLMNOPQRSTUVp0 = _mm512_load_ps(w + 16);
const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0);
const __m512 vi0xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i0 + 16);
i0 += 32;
const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 32);
const __m512 vk0xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 48);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi0xGHIJKLMNOPQRSTUV, vk0xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1);
const __m512 vi1xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i1 + 16);
i1 += 32;
const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 64);
const __m512 vk1xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 80);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi1xGHIJKLMNOPQRSTUV, vk1xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2);
const __m512 vi2xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i2 + 16);
i2 += 32;
const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 96);
const __m512 vk2xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 112);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi2xGHIJKLMNOPQRSTUV, vk2xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
w += 128;
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
__m512 vaccGHIJKLMNOPQRSTUV = _mm512_max_ps(vmin, vaccGHIJKLMNOPQRSTUVp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
vaccGHIJKLMNOPQRSTUV = _mm512_min_ps(vmax, vaccGHIJKLMNOPQRSTUV);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
_mm512_storeu_ps(output + 16, vaccGHIJKLMNOPQRSTUV);
output += 32;
}
for (; c >= 16; c -= 16) {
__m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0);
i0 += 16;
const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 32);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1);
i1 += 16;
const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 64);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2);
i2 += 16;
const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 96);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
w += 16;
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 16);
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
__m512 vacc0123456789ABCDEFp0 = _mm512_maskz_loadu_ps(vmask, w);
const __m512 vi0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i0);
const __m512 vk0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 32);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i1);
const __m512 vk1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 64);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i2);
const __m512 vk2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 96);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_mask_storeu_ps(output, vmask, vacc0123456789ABCDEF);
output += c;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,292 | 38.33125 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p4c-minmax-neon-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p4c__neon_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,542 | 30.078947 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p4c-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p4c__neon(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,335 | 29.327273 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p4c-minmax-neonfma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p4c__neonfma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,545 | 30.105263 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p4c-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p4c__neonfma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,338 | 29.354545 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p4c-minmax-sse-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p4c__sse_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
w += 16;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vk1x0123 = _mm_load_ps(w + 8);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,521 | 27.868852 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p4c-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p4c__sse(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
w += 16;
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vk1x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,346 | 27.364407 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p4c-minmax-wasmrelaxedsimd-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p4c__wasmrelaxedsimd_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
w += 16;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,775 | 29.699187 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p4c-minmax-wasmrelaxedsimd-fma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p4c__wasmrelaxedsimd_fma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
w += 16;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,791 | 29.829268 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p4c-minmax-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p4c__wasmrelaxedsimd_fma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
w += 16;
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,622 | 29.445378 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p4c-minmax-wasmrelaxedsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p4c__wasmrelaxedsimd(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
w += 16;
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,600 | 29.260504 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p4c-minmax-wasmsimd-arm-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p4c__wasmsimd_arm_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
w += 16;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,700 | 29.089431 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p4c-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p4c__wasmsimd_arm(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
w += 16;
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,525 | 28.630252 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p4c-minmax-wasmsimd-x86-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p4c__wasmsimd_x86_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
w += 16;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,704 | 29.121951 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p4c-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p4c__wasmsimd_x86(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
w += 16;
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,529 | 28.663866 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p4c-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_ukernel_3p4c__wasmrelaxedsimd_fma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
w += 16;
const v128_t vacc0123 = vacc0123p0;
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
v128_t vacc0123 = vacc0123p0;
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,271 | 27.701754 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p4c-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_ukernel_3p4c__wasmsimd(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
w += 16;
const v128_t vacc0123 = vacc0123p0;
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
v128_t vacc0123 = vacc0123p0;
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,242 | 27.447368 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p8c-minmax-avx-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p8c__avx_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
w += 32;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,216 | 30.94697 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p8c-minmax-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p8c__avx(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
w += 32;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,017 | 30.390625 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p8c-minmax-fma3-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p8c__fma3_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
w += 32;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,165 | 30.560606 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p8c-minmax-fma3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p8c__fma3(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
w += 32;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 8);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,940 | 29.789063 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p8c-minmax-neon-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p8c__neon_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
// Add up all accumulators to vacc01234567p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 4);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 12);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 20);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 8);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 16);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 24);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,189 | 33.144737 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p8c-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p8c__neon(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 4);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 12);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 20);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 8);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 16);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 24);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,823 | 32.268966 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p8c-minmax-neonfma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p8c__neonfma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
// Add up all accumulators to vacc01234567p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 4);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 12);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 20);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 8);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 16);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 24);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,192 | 33.164474 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p8c-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p8c__neonfma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi1x4567, vk1x4567);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 4);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 12);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 20);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 8);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 16);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 24);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,826 | 32.289655 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p8c-minmax-sse-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p8c__sse_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
__m128 vacc0123p0 = _mm_load_ps(w);
__m128 vacc4567p0 = _mm_load_ps(w + 4);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vk0x0123 = _mm_load_ps(w + 8);
const __m128 vk0x4567 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vk1x0123 = _mm_load_ps(w + 16);
const __m128 vk1x4567 = _mm_load_ps(w + 20);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
__m128 vacc4567p1 = _mm_mul_ps(vi1x4567, vk1x4567);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vk2x0123 = _mm_load_ps(w + 24);
const __m128 vk2x4567 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
w += 32;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
vacc4567p0 = _mm_add_ps(vacc4567p0, vacc4567p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
__m128 vacc4567 = _mm_max_ps(vacc4567p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
vacc4567 = _mm_min_ps(vacc4567, vmax);
_mm_storeu_ps(output, vacc0123);
_mm_storeu_ps(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 16);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
w += 4;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vk0x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vk1x0123 = _mm_load_ps(w + 16);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vk2x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
// Add up all accumulators to vacc01234567p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,204 | 29.982143 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p8c-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p8c__sse(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
__m128 vacc0123p0 = _mm_load_ps(w);
__m128 vacc4567p0 = _mm_load_ps(w + 4);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
i0 += 8;
const __m128 vk0x0123 = _mm_load_ps(w + 8);
const __m128 vk0x4567 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
i1 += 8;
const __m128 vk1x0123 = _mm_load_ps(w + 16);
const __m128 vk1x4567 = _mm_load_ps(w + 20);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi1x4567, vk1x4567));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
i2 += 8;
const __m128 vk2x0123 = _mm_load_ps(w + 24);
const __m128 vk2x4567 = _mm_load_ps(w + 28);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
w += 32;
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
__m128 vacc4567 = _mm_max_ps(vacc4567p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
vacc4567 = _mm_min_ps(vacc4567, vmax);
_mm_storeu_ps(output, vacc0123);
_mm_storeu_ps(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
w += 4;
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vk0x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vk1x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vk2x0123 = _mm_load_ps(w + 24);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,894 | 29.403727 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p8c-minmax-wasmrelaxedsimd-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p8c__wasmrelaxedsimd_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 32;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
v128_t vacc4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4567p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
vacc4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
w += 4;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,634 | 32.343195 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p8c-minmax-wasmrelaxedsimd-fma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p8c__wasmrelaxedsimd_fma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x4567, vk0x4567, vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x4567, vk2x4567, vacc4567p0);
w += 32;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
v128_t vacc4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4567p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
vacc4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
w += 4;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,662 | 32.508876 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p8c-minmax-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p8c__wasmrelaxedsimd_fma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x4567, vk0x4567, vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x4567, vk1x4567, vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x4567, vk2x4567, vacc4567p0);
w += 32;
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
v128_t vacc4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4567p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
vacc4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
w += 4;
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,364 | 32.117284 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p8c-minmax-wasmrelaxedsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p8c__wasmrelaxedsimd(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vk1x4567), vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 32;
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
v128_t vacc4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4567p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
vacc4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
w += 4;
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,324 | 31.87037 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p8c-minmax-wasmsimd-arm-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p8c__wasmsimd_arm_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 32;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
v128_t vacc4567 = wasm_f32x4_max(vmin, vacc4567p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
vacc4567 = wasm_f32x4_min(vmax, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
w += 4;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,487 | 31.473373 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p8c-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p8c__wasmsimd_arm(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vk1x4567), vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 32;
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
v128_t vacc4567 = wasm_f32x4_max(vmin, vacc4567p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
vacc4567 = wasm_f32x4_min(vmax, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
w += 4;
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
v128_t vacc0123 = wasm_f32x4_max(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_min(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,177 | 30.962963 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p8c-minmax-wasmsimd-x86-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p8c__wasmsimd_x86_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
v128_t vacc4567p1 = wasm_f32x4_mul(vi1x4567, vk1x4567);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 32;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
vacc4567p0 = wasm_f32x4_add(vacc4567p0, vacc4567p1);
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
v128_t vacc4567 = wasm_f32x4_pmax(vmin, vacc4567p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
vacc4567 = wasm_f32x4_pmin(vmax, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
w += 4;
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
// Add up all accumulators to vacc01234567p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,495 | 31.52071 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p8c-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_3p8c__wasmsimd_x86(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vk1x4567), vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 32;
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
v128_t vacc4567 = wasm_f32x4_pmax(vmin, vacc4567p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
vacc4567 = wasm_f32x4_pmin(vmax, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
w += 4;
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
v128_t vacc0123 = wasm_f32x4_pmax(vmin, vacc0123p0);
vacc0123 = wasm_f32x4_pmin(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,185 | 31.012346 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p8c-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_ukernel_3p8c__wasmrelaxedsimd_fma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x4567, vk0x4567, vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x4567, vk1x4567, vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
vacc4567p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x4567, vk2x4567, vacc4567p0);
w += 32;
const v128_t vacc0123 = vacc0123p0;
const v128_t vacc4567 = vacc4567p0;
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
w += 4;
const v128_t vacc0123 = vacc0123p0;
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
v128_t vacc0123 = vacc0123p0;
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,811 | 30.045161 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-3p8c-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_ukernel_3p8c__wasmsimd(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 8; c -= 8) {
v128_t vacc0123p0 = wasm_v128_load(w);
v128_t vacc4567p0 = wasm_v128_load(w + 4);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
const v128_t vk0x4567 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vk0x4567), vacc4567p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
const v128_t vk1x4567 = wasm_v128_load(w + 20);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vk1x4567), vacc4567p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
const v128_t vk2x4567 = wasm_v128_load(w + 28);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
vacc4567p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vk2x4567), vacc4567p0);
w += 32;
const v128_t vacc0123 = vacc0123p0;
const v128_t vacc4567 = vacc4567p0;
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
w += 4;
const v128_t vacc0123 = vacc0123p0;
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 24);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
v128_t vacc0123 = vacc0123p0;
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,764 | 29.741935 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p16c-minmax-avx-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p16c__avx_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(w + 8);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
const __m256 vk0x89ABCDEF = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi0x89ABCDEF, vk0x89ABCDEF));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
const __m256 vk1x89ABCDEF = _mm256_load_ps(w + 40);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
__m256 vacc89ABCDEFp1 = _mm256_mul_ps(vi1x89ABCDEF, vk1x89ABCDEF);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
const __m256 vk2x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi2x89ABCDEF, vk2x89ABCDEF));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
i3 += 16;
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
const __m256 vk3x89ABCDEF = _mm256_load_ps(w + 72);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
vacc89ABCDEFp1 = _mm256_add_ps(vacc89ABCDEFp1, _mm256_mul_ps(vi3x89ABCDEF, vk3x89ABCDEF));
w += 80;
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, vacc89ABCDEFp1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
__m256 vacc89ABCDEF = _mm256_max_ps(vmin, vacc89ABCDEFp0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
vacc89ABCDEF = _mm256_min_ps(vmax, vacc89ABCDEF);
_mm256_storeu_ps(output, vacc01234567);
_mm256_storeu_ps(output + 8, vacc89ABCDEF);
output += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
w += 8;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
vacc01234567p1 = _mm256_add_ps(vacc01234567p1, _mm256_mul_ps(vi3x01234567, vk3x01234567));
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,261 | 34.950495 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p16c-minmax-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p16c__avx(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(w + 8);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
const __m256 vk0x89ABCDEF = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi0x89ABCDEF, vk0x89ABCDEF));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
const __m256 vk1x89ABCDEF = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi1x89ABCDEF, vk1x89ABCDEF));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
const __m256 vk2x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi2x89ABCDEF, vk2x89ABCDEF));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
i3 += 16;
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
const __m256 vk3x89ABCDEF = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi3x89ABCDEF, vk3x89ABCDEF));
w += 80;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
__m256 vacc89ABCDEF = _mm256_max_ps(vmin, vacc89ABCDEFp0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
vacc89ABCDEF = _mm256_min_ps(vmax, vacc89ABCDEF);
_mm256_storeu_ps(output, vacc01234567);
_mm256_storeu_ps(output + 8, vacc89ABCDEF);
output += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
w += 8;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,911 | 34.446154 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p16c-minmax-avx512f-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx512.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_dwconv_minmax_ukernel_4p16c__avx512f_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
__m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0);
i0 += 16;
const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 16);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1);
i1 += 16;
const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 32);
__m512 vacc0123456789ABCDEFp1 = _mm512_mul_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2);
i2 += 16;
const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 48);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi3x0123456789ABCDEF = _mm512_loadu_ps(i3);
i3 += 16;
const __m512 vk3x0123456789ABCDEF = _mm512_load_ps(w + 64);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp1);
w += 80;
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc0123456789ABCDEFp0 = _mm512_add_ps(vacc0123456789ABCDEFp0, vacc0123456789ABCDEFp1);
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 16);
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
__m512 vacc0123456789ABCDEFp0 = _mm512_maskz_loadu_ps(vmask, w);
const __m512 vi0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i0);
const __m512 vk0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 16);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i1);
const __m512 vk1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 32);
__m512 vacc0123456789ABCDEFp1 = _mm512_mul_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
const __m512 vi2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i2);
const __m512 vk2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 48);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i3);
const __m512 vk3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 64);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp1);
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc0123456789ABCDEFp0 = _mm512_add_ps(vacc0123456789ABCDEFp0, vacc0123456789ABCDEFp1);
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_mask_storeu_ps(output, vmask, vacc0123456789ABCDEF);
output += c;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,106 | 36.551471 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p16c-minmax-avx512f.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx512.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_dwconv_minmax_ukernel_4p16c__avx512f(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
__m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0);
i0 += 16;
const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 16);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1);
i1 += 16;
const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 32);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2);
i2 += 16;
const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 48);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi3x0123456789ABCDEF = _mm512_loadu_ps(i3);
i3 += 16;
const __m512 vk3x0123456789ABCDEF = _mm512_load_ps(w + 64);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp0);
w += 80;
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 16);
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
__m512 vacc0123456789ABCDEFp0 = _mm512_maskz_loadu_ps(vmask, w);
const __m512 vi0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i0);
const __m512 vk0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 16);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i1);
const __m512 vk1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 32);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i2);
const __m512 vk2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 48);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i3);
const __m512 vk3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 64);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp0);
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_mask_storeu_ps(output, vmask, vacc0123456789ABCDEF);
output += c;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,833 | 35.621212 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p16c-minmax-fma3-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p16c__fma3_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(w + 8);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
const __m256 vk0x89ABCDEF = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi0x89ABCDEF, vk0x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
const __m256 vk1x89ABCDEF = _mm256_load_ps(w + 40);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
__m256 vacc89ABCDEFp1 = _mm256_mul_ps(vi1x89ABCDEF, vk1x89ABCDEF);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
const __m256 vk2x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi2x89ABCDEF, vk2x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
i3 += 16;
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
const __m256 vk3x89ABCDEF = _mm256_load_ps(w + 72);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
vacc89ABCDEFp1 = _mm256_fmadd_ps(vi3x89ABCDEF, vk3x89ABCDEF, vacc89ABCDEFp1);
w += 80;
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, vacc89ABCDEFp1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
__m256 vacc89ABCDEF = _mm256_max_ps(vmin, vacc89ABCDEFp0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
vacc89ABCDEF = _mm256_min_ps(vmax, vacc89ABCDEF);
_mm256_storeu_ps(output, vacc01234567);
_mm256_storeu_ps(output + 8, vacc89ABCDEF);
output += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
w += 8;
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
__m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
// Add up all accumulators to vacc01234567p0
vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,106 | 34.183168 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p16c-minmax-fma3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p16c__fma3(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
__m256 vacc89ABCDEFp0 = _mm256_load_ps(w + 8);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
i0 += 16;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
const __m256 vk0x89ABCDEF = _mm256_load_ps(w + 24);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi0x89ABCDEF, vk0x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
i1 += 16;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
const __m256 vk1x89ABCDEF = _mm256_load_ps(w + 40);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi1x89ABCDEF, vk1x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
i2 += 16;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
const __m256 vk2x89ABCDEF = _mm256_load_ps(w + 56);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi2x89ABCDEF, vk2x89ABCDEF, vacc89ABCDEFp0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
i3 += 16;
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
const __m256 vk3x89ABCDEF = _mm256_load_ps(w + 72);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
vacc89ABCDEFp0 = _mm256_fmadd_ps(vi3x89ABCDEF, vk3x89ABCDEF, vacc89ABCDEFp0);
w += 80;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
__m256 vacc89ABCDEF = _mm256_max_ps(vmin, vacc89ABCDEFp0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
vacc89ABCDEF = _mm256_min_ps(vmax, vacc89ABCDEF);
_mm256_storeu_ps(output, vacc01234567);
_mm256_storeu_ps(output + 8, vacc89ABCDEF);
output += 16;
}
for (; c >= 8; c -= 8) {
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
i0 += 8;
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
i1 += 8;
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
i2 += 8;
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
i3 += 8;
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
w += 8;
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 7);
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ¶ms->avx.mask_table[7 - c]);
__m256 vacc01234567p0 = _mm256_load_ps(w);
const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
vacc01234567p0 = _mm256_fmadd_ps(vi1x01234567, vk1x01234567, vacc01234567p0);
const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
vacc01234567p0 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p0);
__m256 vacc01234567 = _mm256_max_ps(vmin, vacc01234567p0);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
__m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
if (c & 4) {
_mm_storeu_ps(output, vacc0123);
vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
output += 4;
}
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 6,704 | 33.384615 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p16c-minmax-neon-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p16c__neon_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
float32x4_t vacc89ABp0 = vld1q_f32(w); w += 4;
float32x4_t vaccCDEFp0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
float32x4_t vacc89ABp1 = vmulq_f32(vi1x89AB, vk1x89AB);
float32x4_t vaccCDEFp1 = vmulq_f32(vi1xCDEF, vk1xCDEF);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2xCDEF = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk2x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk2xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi2x89AB, vk2x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi2xCDEF, vk2xCDEF);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3xCDEF = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk3x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk3xCDEF = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vmlaq_f32(vacc4567p1, vi3x4567, vk3x4567);
vacc89ABp1 = vmlaq_f32(vacc89ABp1, vi3x89AB, vk3x89AB);
vaccCDEFp1 = vmlaq_f32(vaccCDEFp1, vi3xCDEF, vk3xCDEF);
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vacc89ABp0 = vaddq_f32(vacc89ABp0, vacc89ABp1);
vaccCDEFp0 = vaddq_f32(vaccCDEFp0, vaccCDEFp1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
float32x4_t vacc89AB = vmaxq_f32(vacc89ABp0, vmin);
float32x4_t vaccCDEF = vmaxq_f32(vaccCDEFp0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vacc89AB = vminq_f32(vacc89AB, vmax);
vaccCDEF = vminq_f32(vaccCDEF, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
vst1q_f32(output, vacc89AB); output += 4;
vst1q_f32(output, vaccCDEF); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 12);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 28);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 44);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w + 60);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 16);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 32);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 48);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w + 64);
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,000 | 37.839806 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p16c-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p16c__neon(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
float32x4_t vacc89ABp0 = vld1q_f32(w); w += 4;
float32x4_t vaccCDEFp0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi1x4567, vk1x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi1x89AB, vk1x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi1xCDEF, vk1xCDEF);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2xCDEF = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk2x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk2xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi2x89AB, vk2x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi2xCDEF, vk2xCDEF);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3xCDEF = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk3x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk3xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vmlaq_f32(vacc4567p0, vi3x4567, vk3x4567);
vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi3x89AB, vk3x89AB);
vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi3xCDEF, vk3xCDEF);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
float32x4_t vacc89AB = vmaxq_f32(vacc89ABp0, vmin);
float32x4_t vaccCDEF = vmaxq_f32(vaccCDEFp0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vacc89AB = vminq_f32(vacc89AB, vmax);
vaccCDEF = vminq_f32(vaccCDEF, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
vst1q_f32(output, vacc89AB); output += 4;
vst1q_f32(output, vaccCDEF); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 12);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 28);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 44);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w + 60);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 16);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 32);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 48);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w + 64);
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,518 | 37.167513 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p16c-minmax-neonfma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p16c__neonfma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
float32x4_t vacc89ABp0 = vld1q_f32(w); w += 4;
float32x4_t vaccCDEFp0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
float32x4_t vacc89ABp1 = vmulq_f32(vi1x89AB, vk1x89AB);
float32x4_t vaccCDEFp1 = vmulq_f32(vi1xCDEF, vk1xCDEF);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2xCDEF = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk2x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk2xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi2x89AB, vk2x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi2xCDEF, vk2xCDEF);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3xCDEF = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk3x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk3xCDEF = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
vacc4567p1 = vfmaq_f32(vacc4567p1, vi3x4567, vk3x4567);
vacc89ABp1 = vfmaq_f32(vacc89ABp1, vi3x89AB, vk3x89AB);
vaccCDEFp1 = vfmaq_f32(vaccCDEFp1, vi3xCDEF, vk3xCDEF);
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
vacc89ABp0 = vaddq_f32(vacc89ABp0, vacc89ABp1);
vaccCDEFp0 = vaddq_f32(vaccCDEFp0, vaccCDEFp1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
float32x4_t vacc89AB = vmaxq_f32(vacc89ABp0, vmin);
float32x4_t vaccCDEF = vmaxq_f32(vaccCDEFp0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vacc89AB = vminq_f32(vacc89AB, vmax);
vaccCDEF = vminq_f32(vaccCDEF, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
vst1q_f32(output, vacc89AB); output += 4;
vst1q_f32(output, vaccCDEF); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 12);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 28);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 44);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w + 60);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 16);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 32);
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 48);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w + 64);
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,003 | 37.854369 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p16c-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p16c__neonfma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 16; c -= 16) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
float32x4_t vacc89ABp0 = vld1q_f32(w); w += 4;
float32x4_t vaccCDEFp0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi0x4567, vk0x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi1x4567, vk1x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi1x89AB, vk1x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi1xCDEF, vk1xCDEF);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
const float32x4_t vi2xCDEF = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk2x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk2xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi2x4567, vk2x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi2x89AB, vk2x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi2xCDEF, vk2xCDEF);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
const float32x4_t vi3xCDEF = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
const float32x4_t vk3x89AB = vld1q_f32(w); w += 4;
const float32x4_t vk3xCDEF = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
vacc4567p0 = vfmaq_f32(vacc4567p0, vi3x4567, vk3x4567);
vacc89ABp0 = vfmaq_f32(vacc89ABp0, vi3x89AB, vk3x89AB);
vaccCDEFp0 = vfmaq_f32(vaccCDEFp0, vi3xCDEF, vk3xCDEF);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
float32x4_t vacc89AB = vmaxq_f32(vacc89ABp0, vmin);
float32x4_t vaccCDEF = vmaxq_f32(vaccCDEFp0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vacc4567 = vminq_f32(vacc4567, vmax);
vacc89AB = vminq_f32(vacc89AB, vmax);
vaccCDEF = vminq_f32(vaccCDEF, vmax);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
vst1q_f32(output, vacc89AB); output += 4;
vst1q_f32(output, vaccCDEF); output += 4;
}
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w + 12);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w + 28);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w + 44);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w + 60);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w);
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w + 16);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w + 32);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w + 48);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w + 64);
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,521 | 37.182741 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p1c-minmax-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_4p1c__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
do {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
w += 5;
vacc0p0 += vacc0p1;
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 2,371 | 25.651685 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p1c-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_4p1c__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
do {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
w += 5;
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 2,359 | 25.818182 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p1c-minmax-wasm-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_4p1c__wasm_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
do {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
w += 5;
vacc0p0 += vacc0p1;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
*output++ = vacc0;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 2,389 | 25.853933 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p1c-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_4p1c__wasm(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
do {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
w += 5;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
*output++ = vacc0;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 2,377 | 26.022727 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p1c-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_4p1c__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
do {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
w += 5;
vacc0p0 += vacc0p1;
*output++ = vacc0p0;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 2,195 | 24.835294 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p1c-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_4p1c__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
do {
float vacc0p0 = w[0];
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[2];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[3];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[4];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
w += 5;
*output++ = vacc0p0;
} while (--c != 0);
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 2,183 | 25 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p2c-minmax-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_4p2c__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 2; c -= 2) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
i0 += 2;
const float vk0x0 = w[2];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vk0x1 = w[3];
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
i1 += 2;
const float vk1x0 = w[4];
float vacc0p1 = vi1x0 * vk1x0;
const float vk1x1 = w[5];
float vacc1p1 = vi1x1 * vk1x1;
const float vi2x0 = i2[0];
const float vi2x1 = i2[1];
i2 += 2;
const float vk2x0 = w[6];
vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
const float vk2x1 = w[7];
vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
const float vi3x0 = i3[0];
const float vi3x1 = i3[1];
i3 += 2;
const float vk3x0 = w[8];
vacc0p1 = math_muladd_f32(vi3x0, vk3x0, vacc0p1);
const float vk3x1 = w[9];
vacc1p1 = math_muladd_f32(vi3x1, vk3x1, vacc1p1);
w += 10;
// Add up all accumulators to vacc01p0
vacc0p0 = vacc0p0 + vacc0p1;
vacc1p0 = vacc1p0 + vacc1p1;
float vacc0 = math_max_f32(vacc0p0, vmin);
float vacc1 = math_max_f32(vacc1p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
vacc1 = math_min_f32(vacc1, vmax);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
for (; c >= 1; c -= 1) {
float vacc0p0 = *w++;
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[3];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[5];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[7];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
// Add up all accumulators to vacc01p0
vacc0p0 = vacc0p0 + vacc0p1;
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,883 | 26.352113 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p2c-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_4p2c__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 2; c -= 2) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
i0 += 2;
const float vk0x0 = w[2];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vk0x1 = w[3];
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
i1 += 2;
const float vk1x0 = w[4];
vacc0p0 = math_muladd_f32(vi1x0, vk1x0, vacc0p0);
const float vk1x1 = w[5];
vacc1p0 = math_muladd_f32(vi1x1, vk1x1, vacc1p0);
const float vi2x0 = i2[0];
const float vi2x1 = i2[1];
i2 += 2;
const float vk2x0 = w[6];
vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
const float vk2x1 = w[7];
vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
const float vi3x0 = i3[0];
const float vi3x1 = i3[1];
i3 += 2;
const float vk3x0 = w[8];
vacc0p0 = math_muladd_f32(vi3x0, vk3x0, vacc0p0);
const float vk3x1 = w[9];
vacc1p0 = math_muladd_f32(vi3x1, vk3x1, vacc1p0);
w += 10;
float vacc0 = math_max_f32(vacc0p0, vmin);
float vacc1 = math_max_f32(vacc1p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
vacc1 = math_min_f32(vacc1, vmax);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
for (; c >= 1; c -= 1) {
float vacc0p0 = *w++;
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[3];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[5];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[7];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
float vacc0 = math_max_f32(vacc0p0, vmin);
vacc0 = math_min_f32(vacc0, vmax);
*output++ = vacc0;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,740 | 26.306569 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p2c-minmax-wasm-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_4p2c__wasm_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 2; c -= 2) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
i0 += 2;
const float vk0x0 = w[2];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vk0x1 = w[3];
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
i1 += 2;
const float vk1x0 = w[4];
float vacc0p1 = vi1x0 * vk1x0;
const float vk1x1 = w[5];
float vacc1p1 = vi1x1 * vk1x1;
const float vi2x0 = i2[0];
const float vi2x1 = i2[1];
i2 += 2;
const float vk2x0 = w[6];
vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
const float vk2x1 = w[7];
vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
const float vi3x0 = i3[0];
const float vi3x1 = i3[1];
i3 += 2;
const float vk3x0 = w[8];
vacc0p1 = math_muladd_f32(vi3x0, vk3x0, vacc0p1);
const float vk3x1 = w[9];
vacc1p1 = math_muladd_f32(vi3x1, vk3x1, vacc1p1);
w += 10;
// Add up all accumulators to vacc01p0
vacc0p0 = vacc0p0 + vacc0p1;
vacc1p0 = vacc1p0 + vacc1p1;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
float vacc1 = __builtin_wasm_max_f32(vacc1p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
vacc1 = __builtin_wasm_min_f32(vacc1, vmax);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
for (; c >= 1; c -= 1) {
float vacc0p0 = *w++;
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[3];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[5];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[7];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
// Add up all accumulators to vacc01p0
vacc0p0 = vacc0p0 + vacc0p1;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
*output++ = vacc0;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,941 | 26.760563 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p2c-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_minmax_ukernel_4p2c__wasm(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 2; c -= 2) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
i0 += 2;
const float vk0x0 = w[2];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vk0x1 = w[3];
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
i1 += 2;
const float vk1x0 = w[4];
vacc0p0 = math_muladd_f32(vi1x0, vk1x0, vacc0p0);
const float vk1x1 = w[5];
vacc1p0 = math_muladd_f32(vi1x1, vk1x1, vacc1p0);
const float vi2x0 = i2[0];
const float vi2x1 = i2[1];
i2 += 2;
const float vk2x0 = w[6];
vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
const float vk2x1 = w[7];
vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
const float vi3x0 = i3[0];
const float vi3x1 = i3[1];
i3 += 2;
const float vk3x0 = w[8];
vacc0p0 = math_muladd_f32(vi3x0, vk3x0, vacc0p0);
const float vk3x1 = w[9];
vacc1p0 = math_muladd_f32(vi3x1, vk3x1, vacc1p0);
w += 10;
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
float vacc1 = __builtin_wasm_max_f32(vacc1p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
vacc1 = __builtin_wasm_min_f32(vacc1, vmax);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
for (; c >= 1; c -= 1) {
float vacc0p0 = *w++;
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[3];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[5];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[7];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
float vacc0 = __builtin_wasm_max_f32(vacc0p0, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
*output++ = vacc0;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,798 | 26.729927 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p2c-scalar-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_4p2c__scalar_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 2; c -= 2) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
i0 += 2;
const float vk0x0 = w[2];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vk0x1 = w[3];
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
i1 += 2;
const float vk1x0 = w[4];
float vacc0p1 = vi1x0 * vk1x0;
const float vk1x1 = w[5];
float vacc1p1 = vi1x1 * vk1x1;
const float vi2x0 = i2[0];
const float vi2x1 = i2[1];
i2 += 2;
const float vk2x0 = w[6];
vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
const float vk2x1 = w[7];
vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
const float vi3x0 = i3[0];
const float vi3x1 = i3[1];
i3 += 2;
const float vk3x0 = w[8];
vacc0p1 = math_muladd_f32(vi3x0, vk3x0, vacc0p1);
const float vk3x1 = w[9];
vacc1p1 = math_muladd_f32(vi3x1, vk3x1, vacc1p1);
w += 10;
// Add up all accumulators to vacc01p0
vacc0p0 = vacc0p0 + vacc0p1;
vacc1p0 = vacc1p0 + vacc1p1;
output[0] = vacc0p0;
output[1] = vacc1p0;
output += 2;
}
for (; c >= 1; c -= 1) {
float vacc0p0 = *w++;
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[3];
float vacc0p1 = vi1 * vk1;
const float vi2 = *i2++;
const float vk2 = w[5];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[7];
vacc0p1 = math_muladd_f32(vi3, vk3, vacc0p1);
// Add up all accumulators to vacc01p0
vacc0p0 = vacc0p0 + vacc0p1;
*output++ = vacc0p0;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,529 | 25.742424 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p2c-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_f32_dwconv_ukernel_4p2c__scalar(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 2; c -= 2) {
float vacc0p0 = w[0];
float vacc1p0 = w[1];
const float vi0x0 = i0[0];
const float vi0x1 = i0[1];
i0 += 2;
const float vk0x0 = w[2];
vacc0p0 = math_muladd_f32(vi0x0, vk0x0, vacc0p0);
const float vk0x1 = w[3];
vacc1p0 = math_muladd_f32(vi0x1, vk0x1, vacc1p0);
const float vi1x0 = i1[0];
const float vi1x1 = i1[1];
i1 += 2;
const float vk1x0 = w[4];
vacc0p0 = math_muladd_f32(vi1x0, vk1x0, vacc0p0);
const float vk1x1 = w[5];
vacc1p0 = math_muladd_f32(vi1x1, vk1x1, vacc1p0);
const float vi2x0 = i2[0];
const float vi2x1 = i2[1];
i2 += 2;
const float vk2x0 = w[6];
vacc0p0 = math_muladd_f32(vi2x0, vk2x0, vacc0p0);
const float vk2x1 = w[7];
vacc1p0 = math_muladd_f32(vi2x1, vk2x1, vacc1p0);
const float vi3x0 = i3[0];
const float vi3x1 = i3[1];
i3 += 2;
const float vk3x0 = w[8];
vacc0p0 = math_muladd_f32(vi3x0, vk3x0, vacc0p0);
const float vk3x1 = w[9];
vacc1p0 = math_muladd_f32(vi3x1, vk3x1, vacc1p0);
w += 10;
output[0] = vacc0p0;
output[1] = vacc1p0;
output += 2;
}
for (; c >= 1; c -= 1) {
float vacc0p0 = *w++;
const float vi0 = *i0++;
const float vk0 = w[1];
vacc0p0 = math_muladd_f32(vi0, vk0, vacc0p0);
const float vi1 = *i1++;
const float vk1 = w[3];
vacc0p0 = math_muladd_f32(vi1, vk1, vacc0p0);
const float vi2 = *i2++;
const float vk2 = w[5];
vacc0p0 = math_muladd_f32(vi2, vk2, vacc0p0);
const float vi3 = *i3++;
const float vk3 = w[7];
vacc0p0 = math_muladd_f32(vi3, vk3, vacc0p0);
*output++ = vacc0p0;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,386 | 25.669291 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p32c-minmax-avx512f-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx512.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_dwconv_minmax_ukernel_4p32c__avx512f_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 32; c -= 32) {
__m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
__m512 vaccGHIJKLMNOPQRSTUVp0 = _mm512_load_ps(w + 16);
const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0);
const __m512 vi0xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i0 + 16);
i0 += 32;
const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 32);
const __m512 vk0xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 48);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi0xGHIJKLMNOPQRSTUV, vk0xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1);
const __m512 vi1xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i1 + 16);
i1 += 32;
const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 64);
const __m512 vk1xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 80);
__m512 vacc0123456789ABCDEFp1 = _mm512_mul_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
__m512 vaccGHIJKLMNOPQRSTUVp1 = _mm512_mul_ps(vi1xGHIJKLMNOPQRSTUV, vk1xGHIJKLMNOPQRSTUV);
const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2);
const __m512 vi2xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i2 + 16);
i2 += 32;
const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 96);
const __m512 vk2xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 112);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi2xGHIJKLMNOPQRSTUV, vk2xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
const __m512 vi3x0123456789ABCDEF = _mm512_loadu_ps(i3);
const __m512 vi3xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i3 + 16);
i3 += 32;
const __m512 vk3x0123456789ABCDEF = _mm512_load_ps(w + 128);
const __m512 vk3xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 144);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp1);
vaccGHIJKLMNOPQRSTUVp1 = _mm512_fmadd_ps(vi3xGHIJKLMNOPQRSTUV, vk3xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp1);
w += 160;
// Add up all accumulators to vacc0123456789ABCDEFGHIJKLMNOPQRSTUVp0
vacc0123456789ABCDEFp0 = _mm512_add_ps(vacc0123456789ABCDEFp0, vacc0123456789ABCDEFp1);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_add_ps(vaccGHIJKLMNOPQRSTUVp0, vaccGHIJKLMNOPQRSTUVp1);
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
__m512 vaccGHIJKLMNOPQRSTUV = _mm512_max_ps(vmin, vaccGHIJKLMNOPQRSTUVp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
vaccGHIJKLMNOPQRSTUV = _mm512_min_ps(vmax, vaccGHIJKLMNOPQRSTUV);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
_mm512_storeu_ps(output + 16, vaccGHIJKLMNOPQRSTUV);
output += 32;
}
for (; c >= 16; c -= 16) {
__m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0);
i0 += 16;
const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 32);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1);
i1 += 16;
const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 64);
__m512 vacc0123456789ABCDEFp1 = _mm512_mul_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2);
i2 += 16;
const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 96);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi3x0123456789ABCDEF = _mm512_loadu_ps(i3);
i3 += 16;
const __m512 vk3x0123456789ABCDEF = _mm512_load_ps(w + 128);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp1);
w += 16;
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc0123456789ABCDEFp0 = _mm512_add_ps(vacc0123456789ABCDEFp0, vacc0123456789ABCDEFp1);
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 16);
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
__m512 vacc0123456789ABCDEFp0 = _mm512_maskz_loadu_ps(vmask, w);
const __m512 vi0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i0);
const __m512 vk0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 32);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i1);
const __m512 vk1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 64);
__m512 vacc0123456789ABCDEFp1 = _mm512_mul_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
const __m512 vi2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i2);
const __m512 vk2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 96);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i3);
const __m512 vk3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 128);
vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp1);
// Add up all accumulators to vacc0123456789ABCDEFp0
vacc0123456789ABCDEFp0 = _mm512_add_ps(vacc0123456789ABCDEFp0, vacc0123456789ABCDEFp1);
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_mask_storeu_ps(output, vmask, vacc0123456789ABCDEF);
output += c;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,003 | 40.905759 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p32c-minmax-avx512f.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-avx512.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_dwconv_minmax_ukernel_4p32c__avx512f(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 32; c -= 32) {
__m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
__m512 vaccGHIJKLMNOPQRSTUVp0 = _mm512_load_ps(w + 16);
const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0);
const __m512 vi0xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i0 + 16);
i0 += 32;
const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 32);
const __m512 vk0xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 48);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi0xGHIJKLMNOPQRSTUV, vk0xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1);
const __m512 vi1xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i1 + 16);
i1 += 32;
const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 64);
const __m512 vk1xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 80);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi1xGHIJKLMNOPQRSTUV, vk1xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2);
const __m512 vi2xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i2 + 16);
i2 += 32;
const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 96);
const __m512 vk2xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 112);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi2xGHIJKLMNOPQRSTUV, vk2xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
const __m512 vi3x0123456789ABCDEF = _mm512_loadu_ps(i3);
const __m512 vi3xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i3 + 16);
i3 += 32;
const __m512 vk3x0123456789ABCDEF = _mm512_load_ps(w + 128);
const __m512 vk3xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 144);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp0);
vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi3xGHIJKLMNOPQRSTUV, vk3xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
w += 160;
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
__m512 vaccGHIJKLMNOPQRSTUV = _mm512_max_ps(vmin, vaccGHIJKLMNOPQRSTUVp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
vaccGHIJKLMNOPQRSTUV = _mm512_min_ps(vmax, vaccGHIJKLMNOPQRSTUV);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
_mm512_storeu_ps(output + 16, vaccGHIJKLMNOPQRSTUV);
output += 32;
}
for (; c >= 16; c -= 16) {
__m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0);
i0 += 16;
const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 32);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1);
i1 += 16;
const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 64);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2);
i2 += 16;
const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 96);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi3x0123456789ABCDEF = _mm512_loadu_ps(i3);
i3 += 16;
const __m512 vk3x0123456789ABCDEF = _mm512_load_ps(w + 128);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp0);
w += 16;
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
assert(c >= 1);
assert(c <= 16);
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
__m512 vacc0123456789ABCDEFp0 = _mm512_maskz_loadu_ps(vmask, w);
const __m512 vi0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i0);
const __m512 vk0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 32);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i1);
const __m512 vk1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 64);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i2);
const __m512 vk2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 96);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
const __m512 vi3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i3);
const __m512 vk3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 128);
vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp0);
__m512 vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEFp0);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_mask_storeu_ps(output, vmask, vacc0123456789ABCDEF);
output += c;
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,505 | 39.793478 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p4c-minmax-neon-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p4c__neon_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,051 | 30.905512 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p4c-minmax-neon.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p4c__neon(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vmlaq_f32(vacc0123p0, vi3x0123, vk3x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,844 | 30.260163 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p4c-minmax-neonfma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p4c__neonfma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
// Add up all accumulators to vacc0123p0
vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,054 | 30.929134 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p4c-minmax-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p4c__neonfma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(c != 0) {
float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
const float32x4_t vi0x0123 = vld1q_f32(i0);
const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
const float32x4_t vi1x0123 = vld1q_f32(i1);
const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123);
const float32x4_t vi2x0123 = vld1q_f32(i2);
const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
const float32x4_t vi3x0123 = vld1q_f32(i3);
const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123);
float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
vacc0123 = vminq_f32(vacc0123, vmax);
float32x2_t vacc01 = vget_low_f32(vacc0123);
if (c & 2) {
vst1_f32(output, vacc01); output += 2;
vacc01 = vget_high_f32(vacc0123);
}
if (c & 1) {
vst1_lane_f32(output, vacc01, 0); output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,847 | 30.284553 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p4c-minmax-sse-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p4c__sse_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
w += 20;
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vk1x0123 = _mm_load_ps(w + 8);
__m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
// Add up all accumulators to vacc0123p0
vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,047 | 28.547445 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p4c-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p4c__sse(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
const __m128 vmin = _mm_load_ps(params->sse.min);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vk1x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
w += 20;
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
__m128 vacc0123p0 = _mm_load_ps(w);
const __m128 vi0x0123 = _mm_loadu_ps(i0);
const __m128 vk0x0123 = _mm_load_ps(w + 4);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
const __m128 vi1x0123 = _mm_loadu_ps(i1);
const __m128 vk1x0123 = _mm_load_ps(w + 8);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123));
const __m128 vi2x0123 = _mm_loadu_ps(i2);
const __m128 vk2x0123 = _mm_load_ps(w + 12);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
const __m128 vi3x0123 = _mm_loadu_ps(i3);
const __m128 vk3x0123 = _mm_load_ps(w + 16);
vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi3x0123, vk3x0123));
__m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
vacc0123 = _mm_min_ps(vacc0123, vmax);
if (c & 2) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (c & 1) {
_mm_store_ss(output, vacc0123);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 3,872 | 28.120301 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p4c-minmax-wasmrelaxedsimd-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p4c__wasmrelaxedsimd_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
w += 20;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p1);
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,327 | 30.362319 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p4c-minmax-wasmrelaxedsimd-fma-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p4c__wasmrelaxedsimd_fma_acc2(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p1);
w += 20;
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p1 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p1);
// Add up all accumulators to vacc0123p0
vacc0123p0 = wasm_f32x4_add(vacc0123p0, vacc0123p1);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,349 | 30.521739 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p4c-minmax-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p4c__wasmrelaxedsimd_fma(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p0);
w += 20;
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi0x0123, vk0x0123, vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi1x0123, vk1x0123, vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi2x0123, vk2x0123, vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = __builtin_wasm_relaxed_madd_f32x4(vi3x0123, vk3x0123, vacc0123p0);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,180 | 30.201493 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-dwconv/gen/f32-dwconv-4p4c-minmax-wasmrelaxedsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-dwconv/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/dwconv.h>
void xnn_f32_dwconv_minmax_ukernel_4p4c__wasmrelaxedsimd(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const float* zero,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
const float* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const float*) ((uintptr_t) i0 + input_offset);
}
const float* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const float*) ((uintptr_t) i1 + input_offset);
}
const float* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const float*) ((uintptr_t) i2 + input_offset);
}
const float* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const float*) ((uintptr_t) i3 + input_offset);
}
input = (const float**) ((uintptr_t) input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
w += 20;
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
wasm_v128_store(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(c != 0) {
v128_t vacc0123p0 = wasm_v128_load(w);
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vk0x0123 = wasm_v128_load(w + 4);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vk0x0123), vacc0123p0);
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vk1x0123 = wasm_v128_load(w + 8);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vk1x0123), vacc0123p0);
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vk2x0123 = wasm_v128_load(w + 12);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vk2x0123), vacc0123p0);
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vk3x0123 = wasm_v128_load(w + 16);
vacc0123p0 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vk3x0123), vacc0123p0);
v128_t vacc0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0123p0);
vacc0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0123);
if (c & 2) {
wasm_v128_store64_lane(output, vacc0123, 0);
vacc0123 = wasm_v64x2_shuffle(vacc0123, vacc0123, 1, 1);
output += 2;
}
if (c & 1) {
wasm_v128_store32_lane(output, vacc0123, 0);
output += 1;
}
}
output = (float*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,152 | 29.992537 | 89 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.